Merge branch 'drm-next' of git://people.freedesktop.org/~airlied/linux
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 21 Mar 2016 20:48:00 +0000 (13:48 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 21 Mar 2016 20:48:00 +0000 (13:48 -0700)
Pull drm updates from Dave Airlie:
 "This is the main drm pull request for 4.6 kernel.

  Overall the coolest thing here for me is the nouveau maxwell signed
  firmware support from NVidia, it's taken a long while to extract this
  from them.

  I also wish the ARM vendors just designed one set of display IP, ARM
  display block proliferation is definitely increasing.

  Core:
     - drm_event cleanups
     - Internal API cleanup making mode_fixup optional.
     - Apple GMUX vga switcheroo support.
     - DP AUX testing interface

  Panel:
     - Refactoring of DSI core for use over more transports.

  New driver:
     - ARM hdlcd driver

  i915:
     - FBC/PSR (framebuffer compression, panel self refresh) enabled by default.
     - Ongoing atomic display support work
     - Ongoing runtime PM work
     - Pixel clock limit checks
     - VBT DSI description support
     - GEM fixes
     - GuC firmware scheduler enhancements

  amdkfd:
     - Deferred probing fixes to avoid make file or link ordering.

  amdgpu/radeon:
     - ACP support for i2s audio support.
     - Command Submission/GPU scheduler/GPUVM optimisations
     - Initial GPU reset support for amdgpu

  vmwgfx:
     - Support for DX10 gen mipmaps
     - Pageflipping and other fixes.

  exynos:
     - Exynos5420 SoC support for FIMD
     - Exynos5422 SoC support for MIPI-DSI

  nouveau:
     - GM20x secure boot support - adds acceleration for Maxwell GPUs.
     - GM200 support
     - GM20B clock driver support
     - Power sensors work

  etnaviv:
     - Correctness fixes for GPU cache flushing
     - Better support for i.MX6 systems.

  imx-drm:
     - VBlank IRQ support
     - Fence support
     - OF endpoint support

  msm:
     - HDMI support for 8996 (snapdragon 820)
     - Adreno 430 support
     - Timestamp queries support

  virtio-gpu:
     - Fixes for Android support.

  rockchip:
     - Add support for Innosilicion HDMI

  rcar-du:
     - Support for 4 crtcs
     - R8A7795 support
     - RCar Gen 3 support

  omapdrm:
     - HDMI interlace output support
     - dma-buf import support
     - Refactoring to remove a lot of legacy code.

  tilcdc:
     - Rewrite of pageflipping code
     - dma-buf support
     - pinctrl support

  vc4:
     - HDMI modesetting bug fixes
     - Significant 3D performance improvement.

  fsl-dcu (FreeScale):
     - Lots of fixes

  tegra:
     - Two small fixes

  sti:
     - Atomic support for planes
     - Improved HDMI support"

* 'drm-next' of git://people.freedesktop.org/~airlied/linux: (1063 commits)
  drm/amdgpu: release_pages requires linux/pagemap.h
  drm/sti: restore mode_fixup callback
  drm/amdgpu/gfx7: add MTYPE definition
  drm/amdgpu: removing BO_VAs shouldn't be interruptible
  drm/amd/powerplay: show uvd/vce power gate enablement for tonga.
  drm/amd/powerplay: show uvd/vce power gate info for fiji
  drm/amdgpu: use sched fence if possible
  drm/amdgpu: move ib.fence to job.fence
  drm/amdgpu: give a fence param to ib_free
  drm/amdgpu: include the right version of gmc header files for iceland
  drm/radeon: fix indentation.
  drm/amd/powerplay: add uvd/vce dpm enabling flag to fix the performance issue for CZ
  drm/amdgpu: switch back to 32bit hw fences v2
  drm/amdgpu: remove amdgpu_fence_is_signaled
  drm/amdgpu: drop the extra fence range check v2
  drm/amdgpu: signal fences directly in amdgpu_fence_process
  drm/amdgpu: cleanup amdgpu_fence_wait_empty v2
  drm/amdgpu: keep all fences in an RCU protected array v2
  drm/amdgpu: add number of hardware submissions to amdgpu_fence_driver_init_ring
  drm/amdgpu: RCU protected amd_sched_fence_release
  ...

14 files changed:
1  2 
MAINTAINERS
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/etnaviv/etnaviv_gem.c
drivers/gpu/drm/etnaviv/etnaviv_gpu.c
drivers/gpu/drm/i915/i915_gem_userptr.c
drivers/gpu/drm/omapdrm/omap_dmm_tiler.c
drivers/gpu/drm/omapdrm/omap_gem.c
drivers/gpu/drm/sti/sti_cursor.c
drivers/gpu/drm/sti/sti_gdp.c
drivers/gpu/drm/sti/sti_hqvdp.c
drivers/gpu/drm/vc4/vc4_bo.c
drivers/gpu/host1x/job.c
drivers/staging/android/ion/ion.c
mm/swapfile.c

diff --combined MAINTAINERS
index da636eac9258ccb552d1e777b13f5a6a9ffa1377,a32d2ffd81a8939d136cf25ef2ea411284c8b453..29d9779dc3d23ed79177caddba77bee5df1a3e45
@@@ -151,7 -151,7 +151,7 @@@ S: Maintaine
  F:    drivers/scsi/53c700*
  
  6LOWPAN GENERIC (BTLE/IEEE 802.15.4)
 -M:    Alexander Aring <alex.aring@gmail.com>
 +M:    Alexander Aring <aar@pengutronix.de>
  M:    Jukka Rissanen <jukka.rissanen@linux.intel.com>
  L:    linux-bluetooth@vger.kernel.org
  L:    linux-wpan@vger.kernel.org
@@@ -238,12 -238,6 +238,12 @@@ L:       lm-sensors@lm-sensors.or
  S:    Maintained
  F:    drivers/hwmon/abituguru3.c
  
 +ACCES 104-DIO-48E GPIO DRIVER
 +M:    William Breathitt Gray <vilhelm.gray@gmail.com>
 +L:    linux-gpio@vger.kernel.org
 +S:    Maintained
 +F:    drivers/gpio/gpio-104-dio-48e.c
 +
  ACCES 104-IDI-48 GPIO DRIVER
  M:    "William Breathitt Gray" <vilhelm.gray@gmail.com>
  L:    linux-gpio@vger.kernel.org
@@@ -679,19 -673,11 +679,19 @@@ F:      drivers/gpu/drm/radeon/radeon_kfd.
  F:    drivers/gpu/drm/radeon/radeon_kfd.h
  F:    include/uapi/linux/kfd_ioctl.h
  
 +AMD SEATTLE DEVICE TREE SUPPORT
 +M:    Brijesh Singh <brijeshkumar.singh@amd.com>
 +M:    Suravee Suthikulpanit <suravee.suthikulpanit@amd.com>
 +M:    Tom Lendacky <thomas.lendacky@amd.com>
 +S:    Supported
 +F:    arch/arm64/boot/dts/amd/
 +
  AMD XGBE DRIVER
  M:    Tom Lendacky <thomas.lendacky@amd.com>
  L:    netdev@vger.kernel.org
  S:    Supported
  F:    drivers/net/ethernet/amd/xgbe/
 +F:    arch/arm64/boot/dts/amd/amd-seattle-xgbe*.dtsi
  
  AMS (Apple Motion Sensor) DRIVER
  M:    Michael Hanselmann <linux-kernel@hansmi.ch>
@@@ -783,12 -769,6 +783,12 @@@ L:       alsa-devel@alsa-project.org (moderat
  S:    Maintained
  F:    sound/aoa/
  
 +APEX EMBEDDED SYSTEMS STX104 DAC DRIVER
 +M:    William Breathitt Gray <vilhelm.gray@gmail.com>
 +L:    linux-iio@vger.kernel.org
 +S:    Maintained
 +F:    drivers/iio/dac/stx104.c
 +
  APM DRIVER
  M:    Jiri Kosina <jikos@kernel.org>
  S:    Odd fixes
@@@ -847,6 -827,12 +847,12 @@@ S:       Maintaine
  F:    drivers/net/arcnet/
  F:    include/uapi/linux/if_arcnet.h
  
+ ARM HDLCD DRM DRIVER
+ M:    Liviu Dudau <liviu.dudau@arm.com>
+ S:    Supported
+ F:    drivers/gpu/drm/arm/
+ F:    Documentation/devicetree/bindings/display/arm,hdlcd.txt
  ARM MFM AND FLOPPY DRIVERS
  M:    Ian Molton <spyro@f2s.com>
  S:    Maintained
@@@ -959,16 -945,6 +965,16 @@@ F:       arch/arm/boot/dts/alpine
  F:    arch/arm64/boot/dts/al/
  F:    drivers/*/*alpine*
  
 +ARM/ARTPEC MACHINE SUPPORT
 +M:    Jesper Nilsson <jesper.nilsson@axis.com>
 +M:    Lars Persson <lars.persson@axis.com>
 +M:    Niklas Cassel <niklas.cassel@axis.com>
 +S:    Maintained
 +L:    linux-arm-kernel@axis.com
 +F:    arch/arm/mach-artpec
 +F:    arch/arm/boot/dts/artpec6*
 +F:    drivers/clk/clk-artpec6.c
 +
  ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
  M:    Nicolas Ferre <nicolas.ferre@atmel.com>
  M:    Alexandre Belloni <alexandre.belloni@free-electrons.com>
@@@ -1315,7 -1291,6 +1321,7 @@@ F:      arch/arm/mach-mvebu
  F:    drivers/rtc/rtc-armada38x.c
  F:    arch/arm/boot/dts/armada*
  F:    arch/arm/boot/dts/kirkwood*
 +F:    arch/arm64/boot/dts/marvell/armada*
  
  
  ARM/Marvell Berlin SoC support
@@@ -1534,7 -1509,6 +1540,7 @@@ F:      arch/arm/mach-s5p*
  F:    arch/arm/mach-exynos*/
  F:    drivers/*/*s3c2410*
  F:    drivers/*/*/*s3c2410*
 +F:    drivers/soc/samsung/*
  F:    drivers/spi/spi-s3c*
  F:    sound/soc/samsung/*
  F:    Documentation/arm/Samsung/
@@@ -1988,12 -1962,6 +1994,12 @@@ M:    Nicolas Ferre <nicolas.ferre@atmel.c
  S:    Supported
  F:    drivers/tty/serial/atmel_serial.c
  
 +ATMEL SAMA5D2 ADC DRIVER
 +M:    Ludovic Desroches <ludovic.desroches@atmel.com>
 +L:    linux-iio@vger.kernel.org
 +S:    Supported
 +F:    drivers/iio/adc/at91-sama5d2_adc.c
 +
  ATMEL Audio ALSA driver
  M:    Nicolas Ferre <nicolas.ferre@atmel.com>
  L:    alsa-devel@alsa-project.org (moderated for non-subscribers)
@@@ -2196,8 -2164,7 +2202,8 @@@ M:      Marek Lindner <mareklindner@neomailb
  M:    Simon Wunderlich <sw@simonwunderlich.de>
  M:    Antonio Quartulli <a@unstable.cc>
  L:    b.a.t.m.a.n@lists.open-mesh.org
 -W:    http://www.open-mesh.org/
 +W:    https://www.open-mesh.org/
 +Q:    https://patchwork.open-mesh.org/project/batman/list/
  S:    Maintained
  F:    net/batman-adv/
  
@@@ -2426,9 -2393,8 +2432,9 @@@ F:      arch/arm/boot/dts/bcm470
  
  BROADCOM BCM63XX ARM ARCHITECTURE
  M:    Florian Fainelli <f.fainelli@gmail.com>
 -L:    linux-arm-kernel@lists.infradead.org
 -T:    git git://github.com/broadcom/arm-bcm63xx.git
 +L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 +L:    bcm-kernel-feedback-list@broadcom.com
 +T:    git git://github.com/broadcom/stblinux.git
  S:    Maintained
  F:    arch/arm/mach-bcm/bcm63xx.c
  F:    arch/arm/include/debug/bcm63xx.S
@@@ -2462,14 -2428,12 +2468,14 @@@ F:   arch/mips/bmips/
  F:    arch/mips/include/asm/mach-bmips/*
  F:    arch/mips/kernel/*bmips*
  F:    arch/mips/boot/dts/brcm/bcm*.dts*
 +F:    drivers/irqchip/irq-bcm63*
  F:    drivers/irqchip/irq-bcm7*
  F:    drivers/irqchip/irq-brcmstb*
  F:    include/linux/bcm963xx_nvram.h
  F:    include/linux/bcm963xx_tag.h
  
  BROADCOM TG3 GIGABIT ETHERNET DRIVER
 +M:    Siva Reddy Kallam <siva.kallam@broadcom.com>
  M:    Prashant Sreedharan <prashant@broadcom.com>
  M:    Michael Chan <mchan@broadcom.com>
  L:    netdev@vger.kernel.org
@@@ -2561,13 -2525,6 +2567,13 @@@ L:    netdev@vger.kernel.or
  S:    Supported
  F:    drivers/net/ethernet/broadcom/bcmsysport.*
  
 +BROADCOM VULCAN ARM64 SOC
 +M:    Jayachandran C. <jchandra@broadcom.com>
 +L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 +L:    bcm-kernel-feedback-list@broadcom.com
 +S:    Maintained
 +F:    arch/arm64/boot/dts/broadcom/vulcan*
 +
  BROCADE BFA FC SCSI DRIVER
  M:    Anil Gurumurthy <anil.gurumurthy@qlogic.com>
  M:    Sudarsana Kalluru <sudarsana.kalluru@qlogic.com>
@@@ -3553,14 -3510,6 +3559,14 @@@ F:    include/linux/device-mapper.
  F:    include/linux/dm-*.h
  F:    include/uapi/linux/dm-*.h
  
 +DEVLINK
 +M:    Jiri Pirko <jiri@mellanox.com>
 +L:    netdev@vger.kernel.org
 +S:    Supported
 +F:    net/core/devlink.c
 +F:    include/net/devlink.h
 +F:    include/uapi/linux/devlink.h
 +
  DIALOG SEMICONDUCTOR DRIVERS
  M:    Support Opensource <support.opensource@diasemi.com>
  W:    http://www.dialog-semiconductor.com/products
@@@ -3598,6 -3547,13 +3604,6 @@@ L:     driverdev-devel@linuxdriverproject.o
  S:    Maintained
  F:    drivers/staging/dgnc/
  
 -DIGI EPCA PCI PRODUCTS
 -M:    Lidza Louina <lidza.louina@gmail.com>
 -M:    Daeseok Youn <daeseok.youn@gmail.com>
 -L:    driverdev-devel@linuxdriverproject.org
 -S:    Maintained
 -F:    drivers/staging/dgap/
 -
  DIOLAN U2C-12 I2C DRIVER
  M:    Guenter Roeck <linux@roeck-us.net>
  L:    linux-i2c@vger.kernel.org
@@@ -3754,7 -3710,7 +3760,7 @@@ F:      drivers/gpu/vga
  F:    include/drm/
  F:    include/uapi/drm/
  
- RADEON DRM DRIVERS
+ RADEON and AMDGPU DRM DRIVERS
  M:    Alex Deucher <alexander.deucher@amd.com>
  M:    Christian König <christian.koenig@amd.com>
  L:    dri-devel@lists.freedesktop.org
@@@ -3762,6 -3718,8 +3768,8 @@@ T:      git git://people.freedesktop.org/~ag
  S:    Supported
  F:    drivers/gpu/drm/radeon/
  F:    include/uapi/drm/radeon*
+ F:    drivers/gpu/drm/amd/
+ F:    include/uapi/drm/amdgpu*
  
  DRM PANEL DRIVERS
  M:    Thierry Reding <thierry.reding@gmail.com>
@@@ -3806,7 -3764,7 +3814,7 @@@ F:      include/drm/exynos
  F:    include/uapi/drm/exynos*
  
  DRM DRIVERS FOR FREESCALE DCU
- M:    Jianwei Wang <jianwei.wang.chn@gmail.com>
+ M:    Stefan Agner <stefan@agner.ch>
  M:    Alison Wang <alison.wang@freescale.com>
  L:    dri-devel@lists.freedesktop.org
  S:    Supported
@@@ -4278,6 -4236,13 +4286,6 @@@ M:     Maxim Levitsky <maximlevitsky@gmail.
  S:    Maintained
  F:    drivers/media/rc/ene_ir.*
  
 -ENHANCED ERROR HANDLING (EEH)
 -M:    Gavin Shan <shangw@linux.vnet.ibm.com>
 -L:    linuxppc-dev@lists.ozlabs.org
 -S:    Supported
 -F:    Documentation/powerpc/eeh-pci-error-recovery.txt
 -F:    arch/powerpc/kernel/eeh*.c
 -
  EPSON S1D13XXX FRAMEBUFFER DRIVER
  M:    Kristoffer Ericson <kristoffer.ericson@gmail.com>
  S:    Maintained
@@@ -4869,14 -4834,10 +4877,14 @@@ L:   linux-gpio@vger.kernel.or
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux-gpio.git
  S:    Maintained
  F:    Documentation/gpio/
 +F:    Documentation/ABI/testing/gpio-cdev
 +F:    Documentation/ABI/obsolete/sysfs-gpio
  F:    drivers/gpio/
  F:    include/linux/gpio/
  F:    include/linux/gpio.h
  F:    include/asm-generic/gpio.h
 +F:    include/uapi/linux/gpio.h
 +F:    tools/gpio/
  
  GRE DEMULTIPLEXER DRIVER
  M:    Dmitry Kozlov <xeb@mail.ru>
@@@ -5025,7 -4986,6 +5033,7 @@@ F:      include/linux/hw_random.
  
  HARDWARE SPINLOCK CORE
  M:    Ohad Ben-Cohen <ohad@wizery.com>
 +M:    Bjorn Andersson <bjorn.andersson@linaro.org>
  S:    Maintained
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/hwspinlock.git
  F:    Documentation/hwspinlock.txt
@@@ -5047,10 -5007,16 +5055,10 @@@ T:   git git://linuxtv.org/anttip/media_t
  S:    Maintained
  F:    drivers/media/dvb-frontends/hd29l2*
  
 -HEWLETT-PACKARD SMART2 RAID DRIVER
 -L:    iss_storagedev@hp.com
 -S:    Orphan
 -F:    Documentation/blockdev/cpqarray.txt
 -F:    drivers/block/cpqarray.*
 -
  HEWLETT-PACKARD SMART ARRAY RAID DRIVER (hpsa)
 -M:    Don Brace <don.brace@pmcs.com>
 +M:    Don Brace <don.brace@microsemi.com>
  L:    iss_storagedev@hp.com
 -L:    storagedev@pmcs.com
 +L:    esc.storagedev@microsemi.com
  L:    linux-scsi@vger.kernel.org
  S:    Supported
  F:    Documentation/scsi/hpsa.txt
@@@ -5059,9 -5025,9 +5067,9 @@@ F:      include/linux/cciss*.
  F:    include/uapi/linux/cciss*.h
  
  HEWLETT-PACKARD SMART CISS RAID DRIVER (cciss)
 -M:    Don Brace <don.brace@pmcs.com>
 +M:    Don Brace <don.brace@microsemi.com>
  L:    iss_storagedev@hp.com
 -L:    storagedev@pmcs.com
 +L:    esc.storagedev@microsemi.com
  L:    linux-scsi@vger.kernel.org
  S:    Supported
  F:    Documentation/blockdev/cciss.txt
@@@ -5246,7 -5212,6 +5254,7 @@@ F:      arch/x86/kernel/cpu/mshyperv.
  F:    drivers/hid/hid-hyperv.c
  F:    drivers/hv/
  F:    drivers/input/serio/hyperv-keyboard.c
 +F:    drivers/pci/host/pci-hyperv.c
  F:    drivers/net/hyperv/
  F:    drivers/scsi/storvsc_drv.c
  F:    drivers/video/fbdev/hyperv_fb.c
@@@ -5478,11 -5443,10 +5486,11 @@@ S:   Supporte
  F:    drivers/idle/i7300_idle.c
  
  IEEE 802.15.4 SUBSYSTEM
 -M:    Alexander Aring <alex.aring@gmail.com>
 +M:    Alexander Aring <aar@pengutronix.de>
  L:    linux-wpan@vger.kernel.org
 -W:    https://github.com/linux-wpan
 -T:    git git://github.com/linux-wpan/linux-wpan-next.git
 +W:    http://wpan.cakelab.org/
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/bluetooth/bluetooth-next.git
  S:    Maintained
  F:    net/ieee802154/
  F:    net/mac802154/
@@@ -5612,7 -5576,6 +5620,7 @@@ F:      drivers/input
  F:    include/linux/input.h
  F:    include/uapi/linux/input.h
  F:    include/linux/input/
 +F:    Documentation/devicetree/bindings/input/
  
  INPUT MULTITOUCH (MT) PROTOCOL
  M:    Henrik Rydberg <rydberg@bitmath.org>
@@@ -5807,7 -5770,6 +5815,7 @@@ S:      Supporte
  F:    include/uapi/linux/mei.h
  F:    include/linux/mei_cl_bus.h
  F:    drivers/misc/mei/*
 +F:    drivers/watchdog/mei_wdt.c
  F:    Documentation/misc-devices/mei/*
  
  INTEL MIC DRIVERS (mic)
@@@ -6110,7 -6072,7 +6118,7 @@@ S:      Maintaine
  F:    drivers/media/platform/rcar_jpu.c
  
  JSM Neo PCI based serial card
 -M:    Thadeu Lima de Souza Cascardo <cascardo@linux.vnet.ibm.com>
 +M:    Gabriel Krisman Bertazi <krisman@linux.vnet.ibm.com>
  L:    linux-serial@vger.kernel.org
  S:    Maintained
  F:    drivers/tty/serial/jsm/
@@@ -6628,10 -6590,9 +6636,10 @@@ F:    drivers/platform/x86/hp_accel.
  
  LIVE PATCHING
  M:    Josh Poimboeuf <jpoimboe@redhat.com>
 -M:    Seth Jennings <sjenning@redhat.com>
 +M:    Jessica Yu <jeyu@redhat.com>
  M:    Jiri Kosina <jikos@kernel.org>
 -M:    Vojtech Pavlik <vojtech@suse.com>
 +M:    Miroslav Benes <mbenes@suse.cz>
 +R:    Petr Mladek <pmladek@suse.com>
  S:    Maintained
  F:    kernel/livepatch/
  F:    include/linux/livepatch.h
@@@ -6642,11 -6603,6 +6650,11 @@@ F:    samples/livepatch
  L:    live-patching@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jikos/livepatching.git
  
 +LINUX KERNEL DUMP TEST MODULE (LKDTM)
 +M:    Kees Cook <keescook@chromium.org>
 +S:    Maintained
 +F:    drivers/misc/lkdtm.c
 +
  LLC (802.2)
  M:    Arnaldo Carvalho de Melo <acme@ghostprotocols.net>
  S:    Maintained
@@@ -6732,12 -6688,13 +6740,12 @@@ S:   Maintaine
  F:    arch/arm/mach-lpc32xx/
  
  LSILOGIC MPT FUSION DRIVERS (FC/SAS/SPI)
 -M:    Nagalakshmi Nandigama <nagalakshmi.nandigama@avagotech.com>
 -M:    Praveen Krishnamoorthy <praveen.krishnamoorthy@avagotech.com>
 -M:    Sreekanth Reddy <sreekanth.reddy@avagotech.com>
 -M:    Abhijit Mahajan <abhijit.mahajan@avagotech.com>
 -L:    MPT-FusionLinux.pdl@avagotech.com
 +M:    Sathya Prakash <sathya.prakash@broadcom.com>
 +M:    Chaitra P B <chaitra.basappa@broadcom.com>
 +M:    Suganath Prabu Subramani <suganath-prabu.subramani@broadcom.com>
 +L:    MPT-FusionLinux.pdl@broadcom.com
  L:    linux-scsi@vger.kernel.org
 -W:    http://www.lsilogic.com/support
 +W:    http://www.avagotech.com/support/
  S:    Supported
  F:    drivers/message/fusion/
  F:    drivers/scsi/mpt2sas/
@@@ -6960,7 -6917,7 +6968,7 @@@ MAXIM MAX77802 MULTIFUNCTION PMIC DEVIC
  M:    Javier Martinez Canillas <javier@osg.samsung.com>
  L:    linux-kernel@vger.kernel.org
  S:    Supported
 -F:    drivers/*/*max77802.c
 +F:    drivers/*/*max77802*.c
  F:    Documentation/devicetree/bindings/*/*max77802.txt
  F:    include/dt-bindings/*/*max77802.h
  
@@@ -6970,7 -6927,7 +6978,7 @@@ M:      Krzysztof Kozlowski <k.kozlowski@sam
  L:    linux-kernel@vger.kernel.org
  S:    Supported
  F:    drivers/*/max14577.c
 -F:    drivers/*/max77686.c
 +F:    drivers/*/max77686*.c
  F:    drivers/*/max77693.c
  F:    drivers/extcon/extcon-max14577.c
  F:    drivers/extcon/extcon-max77693.c
@@@ -7075,13 -7032,6 +7083,13 @@@ F:    include/uapi/linux/meye.
  F:    include/uapi/linux/ivtv*
  F:    include/uapi/linux/uvcvideo.h
  
 +MEDIATEK ETHERNET DRIVER
 +M:    Felix Fietkau <nbd@openwrt.org>
 +M:    John Crispin <blogic@openwrt.org>
 +L:    netdev@vger.kernel.org
 +S:    Maintained
 +F:    drivers/net/ethernet/mediatek/
 +
  MEDIATEK MT7601U WIRELESS LAN DRIVER
  M:    Jakub Kicinski <kubakici@wp.pl>
  L:    linux-wireless@vger.kernel.org
@@@ -7293,8 -7243,10 +7301,8 @@@ L:     linux-media@vger.kernel.or
  W:    https://linuxtv.org
  W:    http://palosaari.fi/linux/
  Q:    http://patchwork.linuxtv.org/project/linux-media/list/
 -T:    git git://linuxtv.org/anttip/media_tree.git
  S:    Maintained
 -F:    drivers/staging/media/mn88473/
 -F:    drivers/media/dvb-frontends/mn88473.h
 +F:    drivers/media/dvb-frontends/mn88473*
  
  MODULE SUPPORT
  M:    Rusty Russell <rusty@rustcorp.com.au>
@@@ -7455,17 -7407,6 +7463,17 @@@ W:    https://www.myricom.com/support/down
  S:    Supported
  F:    drivers/net/ethernet/myricom/myri10ge/
  
 +NAND FLASH SUBSYSTEM
 +M:    Boris Brezillon <boris.brezillon@free-electrons.com>
 +R:    Richard Weinberger <richard@nod.at>
 +L:    linux-mtd@lists.infradead.org
 +W:    http://www.linux-mtd.infradead.org/
 +Q:    http://patchwork.ozlabs.org/project/linux-mtd/list/
 +T:    git git://github.com/linux-nand/linux.git
 +S:    Maintained
 +F:    drivers/mtd/nand/
 +F:    include/linux/mtd/nand*.h
 +
  NATSEMI ETHERNET DRIVER (DP8381x)
  S:    Orphan
  F:    drivers/net/ethernet/natsemi/natsemi.c
@@@ -7579,6 -7520,7 +7587,6 @@@ F:      net/netrom
  
  NETRONOME ETHERNET DRIVERS
  M:    Jakub Kicinski <jakub.kicinski@netronome.com>
 -M:    Rolf Neugebauer <rolf.neugebauer@netronome.com>
  L:    oss-drivers@netronome.com
  S:    Maintained
  F:    drivers/net/ethernet/netronome/
@@@ -7715,6 -7657,7 +7723,6 @@@ F:      net/nfc
  F:    include/net/nfc/
  F:    include/uapi/linux/nfc.h
  F:    drivers/nfc/
 -F:    include/linux/platform_data/microread.h
  F:    include/linux/platform_data/nfcmrvl.h
  F:    include/linux/platform_data/nxp-nci.h
  F:    include/linux/platform_data/pn544.h
@@@ -7865,11 -7808,6 +7873,11 @@@ L:    alsa-devel@alsa-project.org (moderat
  S:    Maintained
  F:    sound/soc/codecs/tfa9879*
  
 +OBJTOOL
 +M:    Josh Poimboeuf <jpoimboe@redhat.com>
 +S:    Supported
 +F:    tools/objtool/
 +
  OMAP SUPPORT
  M:    Tony Lindgren <tony@atomide.com>
  L:    linux-omap@vger.kernel.org
@@@ -7920,7 -7858,7 +7928,7 @@@ S:      Maintaine
  F:    arch/arm/*omap*/*clock*
  
  OMAP POWER MANAGEMENT SUPPORT
 -M:    Kevin Hilman <khilman@deeprootsystems.com>
 +M:    Kevin Hilman <khilman@kernel.org>
  L:    linux-omap@vger.kernel.org
  S:    Maintained
  F:    arch/arm/*omap*/*pm*
@@@ -8024,7 -7962,7 +8032,7 @@@ F:      arch/arm/*omap*/usb
  OMAP GPIO DRIVER
  M:    Grygorii Strashko <grygorii.strashko@ti.com>
  M:    Santosh Shilimkar <ssantosh@kernel.org>
 -M:    Kevin Hilman <khilman@deeprootsystems.com>
 +M:    Kevin Hilman <khilman@kernel.org>
  L:    linux-omap@vger.kernel.org
  S:    Maintained
  F:    Documentation/devicetree/bindings/gpio/gpio-omap.txt
@@@ -8243,13 -8181,6 +8251,13 @@@ S:    Maintaine
  F:    Documentation/mn10300/
  F:    arch/mn10300/
  
 +PARALLEL LCD/KEYPAD PANEL DRIVER
 +M:      Willy Tarreau <willy@haproxy.com>
 +M:      Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
 +S:      Odd Fixes
 +F:      Documentation/misc-devices/lcd-panel-cgram.txt
 +F:      drivers/misc/panel.c
 +
  PARALLEL PORT SUBSYSTEM
  M:    Sudip Mukherjee <sudipm.mukherjee@gmail.com>
  M:    Sudip Mukherjee <sudip@vectorindia.org>
@@@ -8341,15 -8272,6 +8349,15 @@@ L:    linux-pci@vger.kernel.or
  S:    Supported
  F:    Documentation/PCI/pci-error-recovery.txt
  
 +PCI ENHANCED ERROR HANDLING (EEH) FOR POWERPC
 +M:    Russell Currey <ruscur@russell.cc>
 +L:    linuxppc-dev@lists.ozlabs.org
 +S:    Supported
 +F:    Documentation/powerpc/eeh-pci-error-recovery.txt
 +F:    arch/powerpc/kernel/eeh*.c
 +F:    arch/powerpc/platforms/*/eeh*.c
 +F:    arch/powerpc/include/*/eeh*.h
 +
  PCI SUBSYSTEM
  M:    Bjorn Helgaas <bhelgaas@google.com>
  L:    linux-pci@vger.kernel.org
@@@ -8457,20 -8379,12 +8465,20 @@@ L:   linux-pci@vger.kernel.or
  S:    Maintained
  F:    drivers/pci/host/*designware*
  
 +PCI DRIVER FOR SYNOPSYS PROTOTYPING DEVICE
 +M:    Joao Pinto <jpinto@synopsys.com>
 +L:    linux-pci@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/pci/designware-pcie.txt
 +F:    drivers/pci/host/pcie-designware-plat.c
 +
  PCI DRIVER FOR GENERIC OF HOSTS
  M:    Will Deacon <will.deacon@arm.com>
  L:    linux-pci@vger.kernel.org
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  F:    Documentation/devicetree/bindings/pci/host-generic-pci.txt
 +F:    drivers/pci/host/pci-host-common.c
  F:    drivers/pci/host/pci-host-generic.c
  
  PCI DRIVER FOR INTEL VOLUME MANAGEMENT DEVICE (VMD)
@@@ -8516,14 -8430,6 +8524,14 @@@ L:     linux-arm-msm@vger.kernel.or
  S:     Maintained
  F:     drivers/pci/host/*qcom*
  
 +PCIE DRIVER FOR CAVIUM THUNDERX
 +M:    David Daney <david.daney@cavium.com>
 +L:    linux-pci@vger.kernel.org
 +L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 +S:    Supported
 +F:    Documentation/devicetree/bindings/pci/pci-thunder-*
 +F:    drivers/pci/host/pci-thunder-*
 +
  PCMCIA SUBSYSTEM
  P:    Linux PCMCIA Team
  L:    linux-pcmcia@lists.infradead.org
@@@ -8549,7 -8455,7 +8557,7 @@@ F:      include/crypto/pcrypt.
  
  PER-CPU MEMORY ALLOCATOR
  M:    Tejun Heo <tj@kernel.org>
 -M:    Christoph Lameter <cl@linux-foundation.org>
 +M:    Christoph Lameter <cl@linux.com>
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tj/percpu.git
  S:    Maintained
  F:    include/linux/percpu*.h
@@@ -8566,7 -8472,6 +8574,7 @@@ PERFORMANCE EVENTS SUBSYSTE
  M:    Peter Zijlstra <peterz@infradead.org>
  M:    Ingo Molnar <mingo@redhat.com>
  M:    Arnaldo Carvalho de Melo <acme@kernel.org>
 +R:    Alexander Shishkin <alexander.shishkin@linux.intel.com>
  L:    linux-kernel@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git perf/core
  S:    Supported
@@@ -9189,14 -9094,10 +9197,14 @@@ S:   Maintaine
  F:    drivers/net/ethernet/rdc/r6040.c
  
  RDS - RELIABLE DATAGRAM SOCKETS
 -M:    Chien Yen <chien.yen@oracle.com>
 +M:    Santosh Shilimkar <santosh.shilimkar@oracle.com>
 +L:    netdev@vger.kernel.org
 +L:    linux-rdma@vger.kernel.org
  L:    rds-devel@oss.oracle.com (moderated for non-subscribers)
 +W:    https://oss.oracle.com/projects/rds/
  S:    Supported
  F:    net/rds/
 +F:    Documentation/networking/rds.txt
  
  READ-COPY UPDATE (RCU)
  M:    "Paul E. McKenney" <paulmck@linux.vnet.ibm.com>
@@@ -9249,7 -9150,6 +9257,7 @@@ F:      include/linux/regmap.
  
  REMOTE PROCESSOR (REMOTEPROC) SUBSYSTEM
  M:    Ohad Ben-Cohen <ohad@wizery.com>
 +M:    Bjorn Andersson <bjorn.andersson@linaro.org>
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/remoteproc.git
  S:    Maintained
  F:    drivers/remoteproc/
@@@ -9258,7 -9158,6 +9266,7 @@@ F:      include/linux/remoteproc.
  
  REMOTE PROCESSOR MESSAGING (RPMSG) SUBSYSTEM
  M:    Ohad Ben-Cohen <ohad@wizery.com>
 +M:    Bjorn Andersson <bjorn.andersson@linaro.org>
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/ohad/rpmsg.git
  S:    Maintained
  F:    drivers/rpmsg/
@@@ -9597,7 -9496,6 +9605,7 @@@ F:      drivers/media/i2c/s5k5baf.
  
  SAMSUNG S3FWRN5 NFC DRIVER
  M:    Robert Baldyga <r.baldyga@samsung.com>
 +M:    Krzysztof Opasiak <k.opasiak@samsung.com>
  L:    linux-nfc@lists.01.org (moderated for non-subscribers)
  S:    Supported
  F:    drivers/nfc/s3fwrn5
@@@ -9771,7 -9669,7 +9779,7 @@@ F:      drivers/scsi/sg.
  F:    include/scsi/sg.h
  
  SCSI SUBSYSTEM
 -M:    "James E.J. Bottomley" <JBottomley@odin.com>
 +M:    "James E.J. Bottomley" <jejb@linux.vnet.ibm.com>
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/jejb/scsi.git
  M:    "Martin K. Petersen" <martin.petersen@oracle.com>
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/mkp/scsi.git
@@@ -10081,7 -9979,7 +10089,7 @@@ F:    arch/arm/mach-s3c24xx/bast-irq.
  
  TI DAVINCI MACHINE SUPPORT
  M:    Sekhar Nori <nsekhar@ti.com>
 -M:    Kevin Hilman <khilman@deeprootsystems.com>
 +M:    Kevin Hilman <khilman@kernel.org>
  T:    git git://gitorious.org/linux-davinci/linux-davinci.git
  Q:    http://patchwork.kernel.org/project/linux-davinci/list/
  S:    Supported
@@@ -10517,6 -10415,19 +10525,6 @@@ L:  linux-tegra@vger.kernel.or
  S:    Maintained
  F:    drivers/staging/nvec/
  
 -STAGING - OLPC SECONDARY DISPLAY CONTROLLER (DCON)
 -M:    Jens Frederich <jfrederich@gmail.com>
 -M:    Daniel Drake <dsd@laptop.org>
 -M:    Jon Nettleton <jon.nettleton@gmail.com>
 -W:    http://wiki.laptop.org/go/DCON
 -S:    Maintained
 -F:    drivers/staging/olpc_dcon/
 -
 -STAGING - PARALLEL LCD/KEYPAD PANEL DRIVER
 -M:    Willy Tarreau <willy@meta-x.org>
 -S:    Odd Fixes
 -F:    drivers/staging/panel/
 -
  STAGING - REALTEK RTL8712U DRIVERS
  M:    Larry Finger <Larry.Finger@lwfinger.net>
  M:    Florian Schilhabel <florian.c.schilhabel@googlemail.com>.
@@@ -11196,8 -11107,8 +11204,8 @@@ M:   Jarkko Sakkinen <jarkko.sakkinen@lin
  R:    Jason Gunthorpe <jgunthorpe@obsidianresearch.com>
  W:    http://tpmdd.sourceforge.net
  L:    tpmdd-devel@lists.sourceforge.net (moderated for non-subscribers)
 -Q:    git git://github.com/PeterHuewe/linux-tpmdd.git
 -T:    git https://github.com/PeterHuewe/linux-tpmdd
 +Q:    https://patchwork.kernel.org/project/tpmdd-devel/list/
 +T:    git git://git.infradead.org/users/jjs/linux-tpmdd.git
  S:    Maintained
  F:    drivers/char/tpm/
  
@@@ -11352,6 -11263,7 +11360,6 @@@ F:   include/linux/cdrom.
  F:    include/uapi/linux/cdrom.h
  
  UNISYS S-PAR DRIVERS
 -M:    Benjamin Romer <benjamin.romer@unisys.com>
  M:    David Kershner <david.kershner@unisys.com>
  L:    sparmaintainer@unisys.com (Unisys internal)
  S:    Supported
@@@ -11376,7 -11288,7 +11384,7 @@@ F:   include/linux/mtd/ubi.
  F:    include/uapi/mtd/ubi-user.h
  
  USB ACM DRIVER
 -M:    Oliver Neukum <oliver@neukum.org>
 +M:    Oliver Neukum <oneukum@suse.com>
  L:    linux-usb@vger.kernel.org
  S:    Maintained
  F:    Documentation/usb/acm.txt
@@@ -11461,13 -11373,6 +11469,13 @@@ S: Maintaine
  F:    drivers/usb/host/isp116x*
  F:    include/linux/usb/isp116x.h
  
 +USB LAN78XX ETHERNET DRIVER
 +M:    Woojung Huh <woojung.huh@microchip.com>
 +M:    Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
 +L:    netdev@vger.kernel.org
 +S:    Maintained
 +F:    drivers/net/usb/lan78xx.*
 +
  USB MASS STORAGE DRIVER
  M:    Matthew Dharm <mdharm-usb@one-eyed-alien.net>
  L:    linux-usb@vger.kernel.org
@@@ -11507,7 -11412,6 +11515,7 @@@ M:   Valentina Manea <valentina.manea.m@g
  M:    Shuah Khan <shuah.kh@samsung.com>
  L:    linux-usb@vger.kernel.org
  S:    Maintained
 +F:    Documentation/usb/usbip_protocol.txt
  F:    drivers/usb/usbip/
  F:    tools/usb/usbip/
  
@@@ -11998,18 -11902,6 +12006,18 @@@ M: David Härdeman <david@hardeman.nu
  S:    Maintained
  F:    drivers/media/rc/winbond-cir.c
  
 +WINSYSTEMS EBC-C384 WATCHDOG DRIVER
 +M:    William Breathitt Gray <vilhelm.gray@gmail.com>
 +L:    linux-watchdog@vger.kernel.org
 +S:    Maintained
 +F:    drivers/watchdog/ebc-c384_wdt.c
 +
 +WINSYSTEMS WS16C48 GPIO DRIVER
 +M:    William Breathitt Gray <vilhelm.gray@gmail.com>
 +L:    linux-gpio@vger.kernel.org
 +S:    Maintained
 +F:    drivers/gpio/gpio-ws16c48.c
 +
  WIMAX STACK
  M:    Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>
  M:    linux-wimax@intel.com
index 7b82e57aa09cbe2884ec382bd680c03fb1d9a85d,0f42b1a24446e05b4c920ea578655ea871f0844d..ab34190859a8d709b54bbfe6bb63178a25c24fe0
@@@ -77,6 -77,8 +77,8 @@@ static void amdgpu_ttm_mem_global_relea
  static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
  {
        struct drm_global_reference *global_ref;
+       struct amdgpu_ring *ring;
+       struct amd_sched_rq *rq;
        int r;
  
        adev->mman.mem_global_referenced = false;
                return r;
        }
  
+       ring = adev->mman.buffer_funcs_ring;
+       rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
+       r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
+                                 rq, amdgpu_sched_jobs);
+       if (r != 0) {
+               DRM_ERROR("Failed setting up TTM BO move run queue.\n");
+               drm_global_item_unref(&adev->mman.mem_global_ref);
+               drm_global_item_unref(&adev->mman.bo_global_ref.ref);
+               return r;
+       }
        adev->mman.mem_global_referenced = true;
        return 0;
  }
  
  static void amdgpu_ttm_global_fini(struct amdgpu_device *adev)
  {
        if (adev->mman.mem_global_referenced) {
+               amd_sched_entity_fini(adev->mman.entity.sched,
+                                     &adev->mman.entity);
                drm_global_item_unref(&adev->mman.bo_global_ref.ref);
                drm_global_item_unref(&adev->mman.mem_global_ref);
                adev->mman.mem_global_referenced = false;
@@@ -478,32 -494,32 +494,32 @@@ static void amdgpu_ttm_io_mem_free(stru
  /*
   * TTM backend functions.
   */
+ struct amdgpu_ttm_gup_task_list {
+       struct list_head        list;
+       struct task_struct      *task;
+ };
  struct amdgpu_ttm_tt {
-       struct ttm_dma_tt               ttm;
-       struct amdgpu_device            *adev;
-       u64                             offset;
-       uint64_t                        userptr;
-       struct mm_struct                *usermm;
-       uint32_t                        userflags;
+       struct ttm_dma_tt       ttm;
+       struct amdgpu_device    *adev;
+       u64                     offset;
+       uint64_t                userptr;
+       struct mm_struct        *usermm;
+       uint32_t                userflags;
+       spinlock_t              guptasklock;
+       struct list_head        guptasks;
+       atomic_t                mmu_invalidations;
  };
  
- /* prepare the sg table with the user pages */
- static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
+ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
  {
-       struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
-       unsigned pinned = 0, nents;
-       int r;
        int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
-       enum dma_data_direction direction = write ?
-               DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
-       if (current->mm != gtt->usermm)
-               return -EPERM;
+       unsigned pinned = 0;
+       int r;
  
        if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
-               /* check that we only pin down anonymous memory
+               /* check that we only use anonymous memory
                   to prevent problems with writeback */
                unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
                struct vm_area_struct *vma;
        do {
                unsigned num_pages = ttm->num_pages - pinned;
                uint64_t userptr = gtt->userptr + pinned * PAGE_SIZE;
-               struct page **pages = ttm->pages + pinned;
+               struct page **p = pages + pinned;
+               struct amdgpu_ttm_gup_task_list guptask;
+               guptask.task = current;
+               spin_lock(&gtt->guptasklock);
+               list_add(&guptask.list, &gtt->guptasks);
+               spin_unlock(&gtt->guptasklock);
 -              r = get_user_pages(current, current->mm, userptr, num_pages,
 -                                 write, 0, p, NULL);
++              r = get_user_pages(userptr, num_pages, write, 0, p, NULL);
+               spin_lock(&gtt->guptasklock);
+               list_del(&guptask.list);
+               spin_unlock(&gtt->guptasklock);
  
-               r = get_user_pages(userptr, num_pages, write, 0, pages, NULL);
                if (r < 0)
                        goto release_pages;
  
  
        } while (pinned < ttm->num_pages);
  
+       return 0;
+ release_pages:
+       release_pages(pages, pinned, 0);
+       return r;
+ }
+ /* prepare the sg table with the user pages */
+ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm)
+ {
+       struct amdgpu_device *adev = amdgpu_get_adev(ttm->bdev);
+       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+       unsigned nents;
+       int r;
+       int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
+       enum dma_data_direction direction = write ?
+               DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
        r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
                                      ttm->num_pages << PAGE_SHIFT,
                                      GFP_KERNEL);
  
  release_sg:
        kfree(ttm->sg);
- release_pages:
-       release_pages(ttm->pages, pinned, 0);
        return r;
  }
  
@@@ -769,38 -813,61 +812,61 @@@ int amdgpu_ttm_tt_set_userptr(struct tt
        gtt->userptr = addr;
        gtt->usermm = current->mm;
        gtt->userflags = flags;
+       spin_lock_init(&gtt->guptasklock);
+       INIT_LIST_HEAD(&gtt->guptasks);
+       atomic_set(&gtt->mmu_invalidations, 0);
        return 0;
  }
  
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm)
struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
  {
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
  
        if (gtt == NULL)
-               return false;
+               return NULL;
  
-       return !!gtt->userptr;
+       return gtt->usermm;
  }
  
  bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
                                  unsigned long end)
  {
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
+       struct amdgpu_ttm_gup_task_list *entry;
        unsigned long size;
  
-       if (gtt == NULL)
-               return false;
-       if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr)
+       if (gtt == NULL || !gtt->userptr)
                return false;
  
        size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
        if (gtt->userptr > end || gtt->userptr + size <= start)
                return false;
  
+       spin_lock(&gtt->guptasklock);
+       list_for_each_entry(entry, &gtt->guptasks, list) {
+               if (entry->task == current) {
+                       spin_unlock(&gtt->guptasklock);
+                       return false;
+               }
+       }
+       spin_unlock(&gtt->guptasklock);
+       atomic_inc(&gtt->mmu_invalidations);
        return true;
  }
  
+ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
+                                      int *last_invalidated)
+ {
+       struct amdgpu_ttm_tt *gtt = (void *)ttm;
+       int prev_invalidated = *last_invalidated;
+       *last_invalidated = atomic_read(&gtt->mmu_invalidations);
+       return prev_invalidated != *last_invalidated;
+ }
  bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
  {
        struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@@ -1014,9 -1081,10 +1080,10 @@@ int amdgpu_copy_buffer(struct amdgpu_ri
                       struct fence **fence)
  {
        struct amdgpu_device *adev = ring->adev;
+       struct amdgpu_job *job;
        uint32_t max_bytes;
        unsigned num_loops, num_dw;
-       struct amdgpu_ib *ib;
        unsigned i;
        int r;
  
        while (num_dw & 0x7)
                num_dw++;
  
-       ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
-       if (!ib)
-               return -ENOMEM;
-       r = amdgpu_ib_get(ring, NULL, num_dw * 4, ib);
-       if (r) {
-               kfree(ib);
+       r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, &job);
+       if (r)
                return r;
-       }
-       ib->length_dw = 0;
  
        if (resv) {
-               r = amdgpu_sync_resv(adev, &ib->sync, resv,
+               r = amdgpu_sync_resv(adev, &job->sync, resv,
                                     AMDGPU_FENCE_OWNER_UNDEFINED);
                if (r) {
                        DRM_ERROR("sync failed (%d).\n", r);
        for (i = 0; i < num_loops; i++) {
                uint32_t cur_size_in_bytes = min(byte_count, max_bytes);
  
-               amdgpu_emit_copy_buffer(adev, ib, src_offset, dst_offset,
-                                       cur_size_in_bytes);
+               amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_offset,
+                                       dst_offset, cur_size_in_bytes);
  
                src_offset += cur_size_in_bytes;
                dst_offset += cur_size_in_bytes;
                byte_count -= cur_size_in_bytes;
        }
  
-       amdgpu_vm_pad_ib(adev, ib);
-       WARN_ON(ib->length_dw > num_dw);
-       r = amdgpu_sched_ib_submit_kernel_helper(adev, ring, ib, 1,
-                                                &amdgpu_vm_free_job,
-                                                AMDGPU_FENCE_OWNER_UNDEFINED,
-                                                fence);
+       amdgpu_ring_pad_ib(ring, &job->ibs[0]);
+       WARN_ON(job->ibs[0].length_dw > num_dw);
+       r = amdgpu_job_submit(job, ring, &adev->mman.entity,
+                             AMDGPU_FENCE_OWNER_UNDEFINED, fence);
        if (r)
                goto error_free;
  
-       if (!amdgpu_enable_scheduler) {
-               amdgpu_ib_free(adev, ib);
-               kfree(ib);
-       }
        return 0;
  error_free:
-       amdgpu_ib_free(adev, ib);
-       kfree(ib);
+       amdgpu_job_free(job);
        return r;
  }
  
index 97d4457be8d260dcac159afff7758b4cbfa9551d,937a77520f58142ea7c65b12d64be1ac5137db05..281c6eca20a85524cc34b615f5bc5d0567b4d7ba
@@@ -260,8 -260,32 +260,32 @@@ etnaviv_gem_get_vram_mapping(struct etn
        return NULL;
  }
  
- int etnaviv_gem_get_iova(struct etnaviv_gpu *gpu,
-       struct drm_gem_object *obj, u32 *iova)
+ void etnaviv_gem_mapping_reference(struct etnaviv_vram_mapping *mapping)
+ {
+       struct etnaviv_gem_object *etnaviv_obj = mapping->object;
+       drm_gem_object_reference(&etnaviv_obj->base);
+       mutex_lock(&etnaviv_obj->lock);
+       WARN_ON(mapping->use == 0);
+       mapping->use += 1;
+       mutex_unlock(&etnaviv_obj->lock);
+ }
+ void etnaviv_gem_mapping_unreference(struct etnaviv_vram_mapping *mapping)
+ {
+       struct etnaviv_gem_object *etnaviv_obj = mapping->object;
+       mutex_lock(&etnaviv_obj->lock);
+       WARN_ON(mapping->use == 0);
+       mapping->use -= 1;
+       mutex_unlock(&etnaviv_obj->lock);
+       drm_gem_object_unreference_unlocked(&etnaviv_obj->base);
+ }
+ struct etnaviv_vram_mapping *etnaviv_gem_mapping_get(
+       struct drm_gem_object *obj, struct etnaviv_gpu *gpu)
  {
        struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
        struct etnaviv_vram_mapping *mapping;
  out:
        mutex_unlock(&etnaviv_obj->lock);
  
-       if (!ret) {
-               /* Take a reference on the object */
-               drm_gem_object_reference(obj);
-               *iova = mapping->iova;
-       }
-       return ret;
- }
- void etnaviv_gem_put_iova(struct etnaviv_gpu *gpu, struct drm_gem_object *obj)
- {
-       struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
-       struct etnaviv_vram_mapping *mapping;
-       mutex_lock(&etnaviv_obj->lock);
-       mapping = etnaviv_gem_get_vram_mapping(etnaviv_obj, gpu->mmu);
-       WARN_ON(mapping->use == 0);
-       mapping->use -= 1;
-       mutex_unlock(&etnaviv_obj->lock);
+       if (ret)
+               return ERR_PTR(ret);
  
-       drm_gem_object_unreference_unlocked(obj);
+       /* Take a reference on the object */
+       drm_gem_object_reference(obj);
+       return mapping;
  }
  
  void *etnaviv_gem_vmap(struct drm_gem_object *obj)
@@@ -753,9 -761,9 +761,9 @@@ static struct page **etnaviv_gem_userpt
  
        down_read(&mm->mmap_sem);
        while (pinned < npages) {
 -              ret = get_user_pages(task, mm, ptr, npages - pinned,
 -                                   !etnaviv_obj->userptr.ro, 0,
 -                                   pvec + pinned, NULL);
 +              ret = get_user_pages_remote(task, mm, ptr, npages - pinned,
 +                                          !etnaviv_obj->userptr.ro, 0,
 +                                          pvec + pinned, NULL);
                if (ret < 0)
                        break;
  
index 3c1ce44483d991416fed77f8945dc6686f3a3341,d13303ce530db3f45453e46ca25146ab51c09a64..09198d0b58140c8d00499588d641c7116011bc77
@@@ -628,6 -628,7 +628,7 @@@ int etnaviv_gpu_init(struct etnaviv_gp
        /* Now program the hardware */
        mutex_lock(&gpu->lock);
        etnaviv_gpu_hw_init(gpu);
+       gpu->exec_state = -1;
        mutex_unlock(&gpu->lock);
  
        pm_runtime_mark_last_busy(gpu->dev);
@@@ -871,17 -872,13 +872,13 @@@ static void recover_worker(struct work_
                gpu->event[i].fence = NULL;
                gpu->event[i].used = false;
                complete(&gpu->event_free);
-               /*
-                * Decrement the PM count for each stuck event. This is safe
-                * even in atomic context as we use ASYNC RPM here.
-                */
-               pm_runtime_put_autosuspend(gpu->dev);
        }
        spin_unlock_irqrestore(&gpu->event_spinlock, flags);
        gpu->completed_fence = gpu->active_fence;
  
        etnaviv_gpu_hw_init(gpu);
        gpu->switch_context = true;
+       gpu->exec_state = -1;
  
        mutex_unlock(&gpu->lock);
        pm_runtime_mark_last_busy(gpu->dev);
@@@ -1106,15 -1103,15 +1103,15 @@@ struct etnaviv_cmdbuf *etnaviv_gpu_cmdb
        size_t nr_bos)
  {
        struct etnaviv_cmdbuf *cmdbuf;
-       size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo[0]),
+       size_t sz = size_vstruct(nr_bos, sizeof(cmdbuf->bo_map[0]),
                                 sizeof(*cmdbuf));
  
        cmdbuf = kzalloc(sz, GFP_KERNEL);
        if (!cmdbuf)
                return NULL;
  
 -      cmdbuf->vaddr = dma_alloc_writecombine(gpu->dev, size, &cmdbuf->paddr,
 -                                             GFP_KERNEL);
 +      cmdbuf->vaddr = dma_alloc_wc(gpu->dev, size, &cmdbuf->paddr,
 +                                   GFP_KERNEL);
        if (!cmdbuf->vaddr) {
                kfree(cmdbuf);
                return NULL;
  
  void etnaviv_gpu_cmdbuf_free(struct etnaviv_cmdbuf *cmdbuf)
  {
 -      dma_free_writecombine(cmdbuf->gpu->dev, cmdbuf->size,
 -                            cmdbuf->vaddr, cmdbuf->paddr);
 +      dma_free_wc(cmdbuf->gpu->dev, cmdbuf->size, cmdbuf->vaddr,
 +                  cmdbuf->paddr);
        kfree(cmdbuf);
  }
  
@@@ -1150,14 -1147,23 +1147,23 @@@ static void retire_worker(struct work_s
                fence_put(cmdbuf->fence);
  
                for (i = 0; i < cmdbuf->nr_bos; i++) {
-                       struct etnaviv_gem_object *etnaviv_obj = cmdbuf->bo[i];
+                       struct etnaviv_vram_mapping *mapping = cmdbuf->bo_map[i];
+                       struct etnaviv_gem_object *etnaviv_obj = mapping->object;
  
                        atomic_dec(&etnaviv_obj->gpu_active);
                        /* drop the refcount taken in etnaviv_gpu_submit */
-                       etnaviv_gem_put_iova(gpu, &etnaviv_obj->base);
+                       etnaviv_gem_mapping_unreference(mapping);
                }
  
                etnaviv_gpu_cmdbuf_free(cmdbuf);
+               /*
+                * We need to balance the runtime PM count caused by
+                * each submission.  Upon submission, we increment
+                * the runtime PM counter, and allocate one event.
+                * So here, we put the runtime PM count for each
+                * completed event.
+                */
+               pm_runtime_put_autosuspend(gpu->dev);
        }
  
        gpu->retired_fence = fence;
@@@ -1304,11 -1310,10 +1310,10 @@@ int etnaviv_gpu_submit(struct etnaviv_g
  
        for (i = 0; i < submit->nr_bos; i++) {
                struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
-               u32 iova;
  
-               /* Each cmdbuf takes a refcount on the iova */
-               etnaviv_gem_get_iova(gpu, &etnaviv_obj->base, &iova);
-               cmdbuf->bo[i] = etnaviv_obj;
+               /* Each cmdbuf takes a refcount on the mapping */
+               etnaviv_gem_mapping_reference(submit->bos[i].mapping);
+               cmdbuf->bo_map[i] = submit->bos[i].mapping;
                atomic_inc(&etnaviv_obj->gpu_active);
  
                if (submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE)
@@@ -1378,15 -1383,6 +1383,6 @@@ static irqreturn_t irq_handler(int irq
                                gpu->completed_fence = fence->seqno;
  
                        event_free(gpu, event);
-                       /*
-                        * We need to balance the runtime PM count caused by
-                        * each submission.  Upon submission, we increment
-                        * the runtime PM counter, and allocate one event.
-                        * So here, we put the runtime PM count for each
-                        * completed event.
-                        */
-                       pm_runtime_put_autosuspend(gpu->dev);
                }
  
                /* Retire the buffer objects in a work */
@@@ -1481,6 -1477,7 +1477,7 @@@ static int etnaviv_gpu_hw_resume(struc
        etnaviv_gpu_hw_init(gpu);
  
        gpu->switch_context = true;
+       gpu->exec_state = -1;
  
        mutex_unlock(&gpu->lock);
  
@@@ -1569,6 -1566,7 +1566,7 @@@ static int etnaviv_gpu_platform_probe(s
  {
        struct device *dev = &pdev->dev;
        struct etnaviv_gpu *gpu;
+       u32 dma_mask;
        int err = 0;
  
        gpu = devm_kzalloc(dev, sizeof(*gpu), GFP_KERNEL);
        mutex_init(&gpu->lock);
  
        /*
-        * Set the GPU base address to the start of physical memory.  This
-        * ensures that if we have up to 2GB, the v1 MMU can address the
-        * highest memory.  This is important as command buffers may be
-        * allocated outside of this limit.
+        * Set the GPU linear window to be at the end of the DMA window, where
+        * the CMA area is likely to reside. This ensures that we are able to
+        * map the command buffers while having the linear window overlap as
+        * much RAM as possible, so we can optimize mappings for other buffers.
         */
-       gpu->memory_base = PHYS_OFFSET;
+       dma_mask = (u32)dma_get_required_mask(dev);
+       if (dma_mask < PHYS_OFFSET + SZ_2G)
+               gpu->memory_base = PHYS_OFFSET;
+       else
+               gpu->memory_base = dma_mask - SZ_2G + 1;
  
        /* Map registers: */
        gpu->mmio = etnaviv_ioremap(pdev, NULL, dev_name(gpu->dev));
index 90dbf8121210934d16338bfe48466c1e4aacdaae,4b09c840d493c03fb6eabfb0cfdd370c7da3cc78..6be40f3ba2c79ea08b7b144b6a0de907767ddf93
@@@ -49,21 -49,18 +49,18 @@@ struct i915_mmu_notifier 
        struct hlist_node node;
        struct mmu_notifier mn;
        struct rb_root objects;
-       struct list_head linear;
-       bool has_linear;
  };
  
  struct i915_mmu_object {
        struct i915_mmu_notifier *mn;
+       struct drm_i915_gem_object *obj;
        struct interval_tree_node it;
        struct list_head link;
-       struct drm_i915_gem_object *obj;
        struct work_struct work;
-       bool active;
-       bool is_linear;
+       bool attached;
  };
  
- static void __cancel_userptr__worker(struct work_struct *work)
+ static void cancel_userptr(struct work_struct *work)
  {
        struct i915_mmu_object *mo = container_of(work, typeof(*mo), work);
        struct drm_i915_gem_object *obj = mo->obj;
@@@ -81,7 -78,7 +78,7 @@@
                was_interruptible = dev_priv->mm.interruptible;
                dev_priv->mm.interruptible = false;
  
-               list_for_each_entry_safe(vma, tmp, &obj->vma_list, vma_link) {
+               list_for_each_entry_safe(vma, tmp, &obj->vma_list, obj_link) {
                        int ret = i915_vma_unbind(vma);
                        WARN_ON(ret && ret != -EIO);
                }
        mutex_unlock(&dev->struct_mutex);
  }
  
- static unsigned long cancel_userptr(struct i915_mmu_object *mo)
+ static void add_object(struct i915_mmu_object *mo)
  {
-       unsigned long end = mo->obj->userptr.ptr + mo->obj->base.size;
-       /* The mmu_object is released late when destroying the
-        * GEM object so it is entirely possible to gain a
-        * reference on an object in the process of being freed
-        * since our serialisation is via the spinlock and not
-        * the struct_mutex - and consequently use it after it
-        * is freed and then double free it.
-        */
-       if (mo->active && kref_get_unless_zero(&mo->obj->base.refcount)) {
-               schedule_work(&mo->work);
-               /* only schedule one work packet to avoid the refleak */
-               mo->active = false;
-       }
+       if (mo->attached)
+               return;
  
-       return end;
+       interval_tree_insert(&mo->it, &mo->mn->objects);
+       mo->attached = true;
+ }
+ static void del_object(struct i915_mmu_object *mo)
+ {
+       if (!mo->attached)
+               return;
+       interval_tree_remove(&mo->it, &mo->mn->objects);
+       mo->attached = false;
  }
  
  static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
        struct i915_mmu_notifier *mn =
                container_of(_mn, struct i915_mmu_notifier, mn);
        struct i915_mmu_object *mo;
+       struct interval_tree_node *it;
+       LIST_HEAD(cancelled);
+       if (RB_EMPTY_ROOT(&mn->objects))
+               return;
  
        /* interval ranges are inclusive, but invalidate range is exclusive */
        end--;
  
        spin_lock(&mn->lock);
-       if (mn->has_linear) {
-               list_for_each_entry(mo, &mn->linear, link) {
-                       if (mo->it.last < start || mo->it.start > end)
-                               continue;
-                       cancel_userptr(mo);
-               }
-       } else {
-               struct interval_tree_node *it;
+       it = interval_tree_iter_first(&mn->objects, start, end);
+       while (it) {
+               /* The mmu_object is released late when destroying the
+                * GEM object so it is entirely possible to gain a
+                * reference on an object in the process of being freed
+                * since our serialisation is via the spinlock and not
+                * the struct_mutex - and consequently use it after it
+                * is freed and then double free it. To prevent that
+                * use-after-free we only acquire a reference on the
+                * object if it is not in the process of being destroyed.
+                */
+               mo = container_of(it, struct i915_mmu_object, it);
+               if (kref_get_unless_zero(&mo->obj->base.refcount))
+                       schedule_work(&mo->work);
  
-               it = interval_tree_iter_first(&mn->objects, start, end);
-               while (it) {
-                       mo = container_of(it, struct i915_mmu_object, it);
-                       start = cancel_userptr(mo);
-                       it = interval_tree_iter_next(it, start, end);
-               }
+               list_add(&mo->link, &cancelled);
+               it = interval_tree_iter_next(it, start, end);
        }
+       list_for_each_entry(mo, &cancelled, link)
+               del_object(mo);
        spin_unlock(&mn->lock);
  }
  
@@@ -164,8 -167,6 +167,6 @@@ i915_mmu_notifier_create(struct mm_stru
        spin_lock_init(&mn->lock);
        mn->mn.ops = &i915_gem_userptr_notifier;
        mn->objects = RB_ROOT;
-       INIT_LIST_HEAD(&mn->linear);
-       mn->has_linear = false;
  
         /* Protected by mmap_sem (write-lock) */
        ret = __mmu_notifier_register(&mn->mn, mm);
        return mn;
  }
  
- static int
- i915_mmu_notifier_add(struct drm_device *dev,
-                     struct i915_mmu_notifier *mn,
-                     struct i915_mmu_object *mo)
- {
-       struct interval_tree_node *it;
-       int ret = 0;
-       /* By this point we have already done a lot of expensive setup that
-        * we do not want to repeat just because the caller (e.g. X) has a
-        * signal pending (and partly because of that expensive setup, X
-        * using an interrupt timer is likely to get stuck in an EINTR loop).
-        */
-       mutex_lock(&dev->struct_mutex);
-       /* Make sure we drop the final active reference (and thereby
-        * remove the objects from the interval tree) before we do
-        * the check for overlapping objects.
-        */
-       i915_gem_retire_requests(dev);
-       spin_lock(&mn->lock);
-       it = interval_tree_iter_first(&mn->objects,
-                                     mo->it.start, mo->it.last);
-       if (it) {
-               struct drm_i915_gem_object *obj;
-               /* We only need to check the first object in the range as it
-                * either has cancelled gup work queued and we need to
-                * return back to the user to give time for the gup-workers
-                * to flush their object references upon which the object will
-                * be removed from the interval-tree, or the the range is
-                * still in use by another client and the overlap is invalid.
-                *
-                * If we do have an overlap, we cannot use the interval tree
-                * for fast range invalidation.
-                */
-               obj = container_of(it, struct i915_mmu_object, it)->obj;
-               if (!obj->userptr.workers)
-                       mn->has_linear = mo->is_linear = true;
-               else
-                       ret = -EAGAIN;
-       } else
-               interval_tree_insert(&mo->it, &mn->objects);
-       if (ret == 0)
-               list_add(&mo->link, &mn->linear);
-       spin_unlock(&mn->lock);
-       mutex_unlock(&dev->struct_mutex);
-       return ret;
- }
- static bool i915_mmu_notifier_has_linear(struct i915_mmu_notifier *mn)
- {
-       struct i915_mmu_object *mo;
-       list_for_each_entry(mo, &mn->linear, link)
-               if (mo->is_linear)
-                       return true;
-       return false;
- }
- static void
- i915_mmu_notifier_del(struct i915_mmu_notifier *mn,
-                     struct i915_mmu_object *mo)
- {
-       spin_lock(&mn->lock);
-       list_del(&mo->link);
-       if (mo->is_linear)
-               mn->has_linear = i915_mmu_notifier_has_linear(mn);
-       else
-               interval_tree_remove(&mo->it, &mn->objects);
-       spin_unlock(&mn->lock);
- }
  static void
  i915_gem_userptr_release__mmu_notifier(struct drm_i915_gem_object *obj)
  {
        if (mo == NULL)
                return;
  
-       i915_mmu_notifier_del(mo->mn, mo);
+       spin_lock(&mo->mn->lock);
+       del_object(mo);
+       spin_unlock(&mo->mn->lock);
        kfree(mo);
  
        obj->userptr.mmu_object = NULL;
@@@ -299,7 -223,6 +223,6 @@@ i915_gem_userptr_init__mmu_notifier(str
  {
        struct i915_mmu_notifier *mn;
        struct i915_mmu_object *mo;
-       int ret;
  
        if (flags & I915_USERPTR_UNSYNCHRONIZED)
                return capable(CAP_SYS_ADMIN) ? 0 : -EPERM;
                return -ENOMEM;
  
        mo->mn = mn;
-       mo->it.start = obj->userptr.ptr;
-       mo->it.last = mo->it.start + obj->base.size - 1;
        mo->obj = obj;
-       INIT_WORK(&mo->work, __cancel_userptr__worker);
-       ret = i915_mmu_notifier_add(obj->base.dev, mn, mo);
-       if (ret) {
-               kfree(mo);
-               return ret;
-       }
+       mo->it.start = obj->userptr.ptr;
+       mo->it.last = obj->userptr.ptr + obj->base.size - 1;
+       INIT_WORK(&mo->work, cancel_userptr);
  
        obj->userptr.mmu_object = mo;
        return 0;
@@@ -552,8 -469,10 +469,10 @@@ __i915_gem_userptr_set_active(struct dr
        /* In order to serialise get_pages with an outstanding
         * cancel_userptr, we must drop the struct_mutex and try again.
         */
-       if (!value || !work_pending(&obj->userptr.mmu_object->work))
-               obj->userptr.mmu_object->active = value;
+       if (!value)
+               del_object(obj->userptr.mmu_object);
+       else if (!work_pending(&obj->userptr.mmu_object->work))
+               add_object(obj->userptr.mmu_object);
        else
                ret = -EAGAIN;
        spin_unlock(&obj->userptr.mmu_object->mn->lock);
@@@ -584,11 -503,11 +503,11 @@@ __i915_gem_userptr_get_pages_worker(str
  
                down_read(&mm->mmap_sem);
                while (pinned < npages) {
 -                      ret = get_user_pages(work->task, mm,
 -                                           obj->userptr.ptr + pinned * PAGE_SIZE,
 -                                           npages - pinned,
 -                                           !obj->userptr.read_only, 0,
 -                                           pvec + pinned, NULL);
 +                      ret = get_user_pages_remote(work->task, mm,
 +                                      obj->userptr.ptr + pinned * PAGE_SIZE,
 +                                      npages - pinned,
 +                                      !obj->userptr.read_only, 0,
 +                                      pvec + pinned, NULL);
                        if (ret < 0)
                                break;
  
index 85dfe3674b4138ef02e1b3f043699459d9decb25,9f94576c435d3e7a06435001fcb9a817cee37dfc..de275a5be1dbfd4013a3c042590d804ed46b53fa
@@@ -79,6 -79,16 +79,16 @@@ static const uint32_t reg[][4] = 
                        DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
  };
  
+ static u32 dmm_read(struct dmm *dmm, u32 reg)
+ {
+       return readl(dmm->base + reg);
+ }
+ static void dmm_write(struct dmm *dmm, u32 val, u32 reg)
+ {
+       writel(val, dmm->base + reg);
+ }
  /* simple allocator to grab next 16 byte aligned memory from txn */
  static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
  {
@@@ -108,7 -118,7 +118,7 @@@ static int wait_status(struct refill_en
  
        i = DMM_FIXED_RETRY_COUNT;
        while (true) {
-               r = readl(dmm->base + reg[PAT_STATUS][engine->id]);
+               r = dmm_read(dmm, reg[PAT_STATUS][engine->id]);
                err = r & DMM_PATSTATUS_ERR;
                if (err)
                        return -EFAULT;
@@@ -140,11 -150,11 +150,11 @@@ static void release_engine(struct refil
  static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
  {
        struct dmm *dmm = arg;
-       uint32_t status = readl(dmm->base + DMM_PAT_IRQSTATUS);
+       uint32_t status = dmm_read(dmm, DMM_PAT_IRQSTATUS);
        int i;
  
        /* ack IRQ */
-       writel(status, dmm->base + DMM_PAT_IRQSTATUS);
+       dmm_write(dmm, status, DMM_PAT_IRQSTATUS);
  
        for (i = 0; i < dmm->num_engines; i++) {
                if (status & DMM_IRQSTAT_LST) {
@@@ -264,7 -274,7 +274,7 @@@ static int dmm_txn_commit(struct dmm_tx
        txn->last_pat->next_pa = 0;
  
        /* write to PAT_DESCR to clear out any pending transaction */
-       writel(0x0, dmm->base + reg[PAT_DESCR][engine->id]);
+       dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]);
  
        /* wait for engine ready: */
        ret = wait_status(engine, DMM_PATSTATUS_READY);
        smp_mb();
  
        /* kick reload */
-       writel(engine->refill_pa,
-               dmm->base + reg[PAT_DESCR][engine->id]);
+       dmm_write(dmm, engine->refill_pa, reg[PAT_DESCR][engine->id]);
  
        if (wait) {
                if (!wait_for_completion_timeout(&engine->compl,
@@@ -309,6 -318,21 +318,21 @@@ static int fill(struct tcm_area *area, 
        struct tcm_area slice, area_s;
        struct dmm_txn *txn;
  
+       /*
+        * FIXME
+        *
+        * Asynchronous fill does not work reliably, as the driver does not
+        * handle errors in the async code paths. The fill operation may
+        * silently fail, leading to leaking DMM engines, which may eventually
+        * lead to deadlock if we run out of DMM engines.
+        *
+        * For now, always set 'wait' so that we only use sync fills. Async
+        * fills should be fixed, or alternatively we could decide to only
+        * support sync fills and so the whole async code path could be removed.
+        */
+       wait = true;
        txn = dmm_txn_init(omap_dmm, area->tcm);
        if (IS_ERR_OR_NULL(txn))
                return -ENOMEM;
@@@ -573,9 -597,10 +597,9 @@@ static int omap_dmm_remove(struct platf
  
                kfree(omap_dmm->engines);
                if (omap_dmm->refill_va)
 -                      dma_free_writecombine(omap_dmm->dev,
 -                              REFILL_BUFFER_SIZE * omap_dmm->num_engines,
 -                              omap_dmm->refill_va,
 -                              omap_dmm->refill_pa);
 +                      dma_free_wc(omap_dmm->dev,
 +                                  REFILL_BUFFER_SIZE * omap_dmm->num_engines,
 +                                  omap_dmm->refill_va, omap_dmm->refill_pa);
                if (omap_dmm->dummy_page)
                        __free_page(omap_dmm->dummy_page);
  
@@@ -641,7 -666,7 +665,7 @@@ static int omap_dmm_probe(struct platfo
  
        omap_dmm->dev = &dev->dev;
  
-       hwinfo = readl(omap_dmm->base + DMM_PAT_HWINFO);
+       hwinfo = dmm_read(omap_dmm, DMM_PAT_HWINFO);
        omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
        omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
        omap_dmm->container_width = 256;
        atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines);
  
        /* read out actual LUT width and height */
-       pat_geom = readl(omap_dmm->base + DMM_PAT_GEOMETRY);
+       pat_geom = dmm_read(omap_dmm, DMM_PAT_GEOMETRY);
        omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
        omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
  
                omap_dmm->num_lut++;
  
        /* initialize DMM registers */
-       writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__0);
-       writel(0x88888888, omap_dmm->base + DMM_PAT_VIEW__1);
-       writel(0x80808080, omap_dmm->base + DMM_PAT_VIEW_MAP__0);
-       writel(0x80000000, omap_dmm->base + DMM_PAT_VIEW_MAP_BASE);
-       writel(0x88888888, omap_dmm->base + DMM_TILER_OR__0);
-       writel(0x88888888, omap_dmm->base + DMM_TILER_OR__1);
+       dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__0);
+       dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__1);
+       dmm_write(omap_dmm, 0x80808080, DMM_PAT_VIEW_MAP__0);
+       dmm_write(omap_dmm, 0x80000000, DMM_PAT_VIEW_MAP_BASE);
+       dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__0);
+       dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__1);
  
        ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
                                "omap_dmm_irq_handler", omap_dmm);
         * buffers for accelerated pan/scroll) and FILL_DSC<n> which
         * we just generally don't care about.
         */
-       writel(0x7e7e7e7e, omap_dmm->base + DMM_PAT_IRQENABLE_SET);
+       dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_SET);
  
        omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
        if (!omap_dmm->dummy_page) {
        omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
  
        /* alloc refill memory */
 -      omap_dmm->refill_va = dma_alloc_writecombine(&dev->dev,
 -                              REFILL_BUFFER_SIZE * omap_dmm->num_engines,
 -                              &omap_dmm->refill_pa, GFP_KERNEL);
 +      omap_dmm->refill_va = dma_alloc_wc(&dev->dev,
 +                                         REFILL_BUFFER_SIZE * omap_dmm->num_engines,
 +                                         &omap_dmm->refill_pa, GFP_KERNEL);
        if (!omap_dmm->refill_va) {
                dev_err(&dev->dev, "could not allocate refill memory\n");
                goto fail;
index 359b0d7e8ef78faf6cd9d6e044cfb5088f99f0be,cc36a8dc9bd4c1b47bff9a73042ebdd448111624..907154f5b67ce07ab6c7c2486a8149d1ba8f8e34
@@@ -31,9 -31,9 +31,9 @@@
   */
  
  /* note: we use upper 8 bits of flags for driver-internal flags: */
- #define OMAP_BO_DMA           0x01000000      /* actually is physically contiguous */
- #define OMAP_BO_EXT_SYNC      0x02000000      /* externally allocated sync object */
- #define OMAP_BO_EXT_MEM               0x04000000      /* externally allocated memory */
+ #define OMAP_BO_MEM_DMA_API   0x01000000      /* memory allocated with the dma_alloc_* API */
+ #define OMAP_BO_MEM_SHMEM     0x02000000      /* memory allocated through shmem backing */
+ #define OMAP_BO_MEM_DMABUF    0x08000000      /* memory imported from a dmabuf */
  
  struct omap_gem_object {
        struct drm_gem_object base;
        uint32_t roll;
  
        /**
-        * If buffer is allocated physically contiguous, the OMAP_BO_DMA flag
-        * is set and the paddr is valid.  Also if the buffer is remapped in
-        * TILER and paddr_cnt > 0, then paddr is valid.  But if you are using
-        * the physical address and OMAP_BO_DMA is not set, then you should
-        * be going thru omap_gem_{get,put}_paddr() to ensure the mapping is
-        * not removed from under your feet.
+        * paddr contains the buffer DMA address. It is valid for
         *
-        * Note that OMAP_BO_SCANOUT is a hint from userspace that DMA capable
-        * buffer is requested, but doesn't mean that it is.  Use the
-        * OMAP_BO_DMA flag to determine if the buffer has a DMA capable
-        * physical address.
+        * - buffers allocated through the DMA mapping API (with the
+        *   OMAP_BO_MEM_DMA_API flag set)
+        *
+        * - buffers imported from dmabuf (with the OMAP_BO_MEM_DMABUF flag set)
+        *   if they are physically contiguous (when sgt->orig_nents == 1)
+        *
+        * - buffers mapped through the TILER when paddr_cnt is not zero, in
+        *   which case the DMA address points to the TILER aperture
+        *
+        * Physically contiguous buffers have their DMA address equal to the
+        * physical address as we don't remap those buffers through the TILER.
+        *
+        * Buffers mapped to the TILER have their DMA address pointing to the
+        * TILER aperture. As TILER mappings are refcounted (through paddr_cnt)
+        * the DMA address must be accessed through omap_get_get_paddr() to
+        * ensure that the mapping won't disappear unexpectedly. References must
+        * be released with omap_gem_put_paddr().
         */
        dma_addr_t paddr;
  
         */
        uint32_t paddr_cnt;
  
+       /**
+        * If the buffer has been imported from a dmabuf the OMAP_DB_DMABUF flag
+        * is set and the sgt field is valid.
+        */
+       struct sg_table *sgt;
        /**
         * tiler block used when buffer is remapped in DMM/TILER.
         */
         * sync-object allocated on demand (if needed)
         *
         * Per-buffer sync-object for tracking pending and completed hw/dma
-        * read and write operations.  The layout in memory is dictated by
-        * the SGX firmware, which uses this information to stall the command
-        * stream if a surface is not ready yet.
-        *
-        * Note that when buffer is used by SGX, the sync-object needs to be
-        * allocated from a special heap of sync-objects.  This way many sync
-        * objects can be packed in a page, and not waste GPU virtual address
-        * space.  Because of this we have to have a omap_gem_set_sync_object()
-        * API to allow replacement of the syncobj after it has (potentially)
-        * already been allocated.  A bit ugly but I haven't thought of a
-        * better alternative.
+        * read and write operations.
         */
        struct {
                uint32_t write_pending;
@@@ -166,16 -170,15 +170,15 @@@ static uint64_t mmap_offset(struct drm_
        return drm_vma_node_offset_addr(&obj->vma_node);
  }
  
- /* GEM objects can either be allocated from contiguous memory (in which
-  * case obj->filp==NULL), or w/ shmem backing (obj->filp!=NULL).  But non
-  * contiguous buffers can be remapped in TILER/DMM if they need to be
-  * contiguous... but we don't do this all the time to reduce pressure
-  * on TILER/DMM space when we know at allocation time that the buffer
-  * will need to be scanned out.
-  */
- static inline bool is_shmem(struct drm_gem_object *obj)
+ static bool is_contiguous(struct omap_gem_object *omap_obj)
  {
-       return obj->filp != NULL;
+       if (omap_obj->flags & OMAP_BO_MEM_DMA_API)
+               return true;
+       if ((omap_obj->flags & OMAP_BO_MEM_DMABUF) && omap_obj->sgt->nents == 1)
+               return true;
+       return false;
  }
  
  /* -----------------------------------------------------------------------------
@@@ -264,6 -267,19 +267,19 @@@ static int omap_gem_attach_pages(struc
                for (i = 0; i < npages; i++) {
                        addrs[i] = dma_map_page(dev->dev, pages[i],
                                        0, PAGE_SIZE, DMA_BIDIRECTIONAL);
+                       if (dma_mapping_error(dev->dev, addrs[i])) {
+                               dev_warn(dev->dev,
+                                       "%s: failed to map page\n", __func__);
+                               for (i = i - 1; i >= 0; --i) {
+                                       dma_unmap_page(dev->dev, addrs[i],
+                                               PAGE_SIZE, DMA_BIDIRECTIONAL);
+                               }
+                               ret = -ENOMEM;
+                               goto free_addrs;
+                       }
                }
        } else {
                addrs = kzalloc(npages * sizeof(*addrs), GFP_KERNEL);
  
        return 0;
  
+ free_addrs:
+       kfree(addrs);
  free_pages:
        drm_gem_put_pages(obj, pages, true, false);
  
@@@ -292,7 -310,7 +310,7 @@@ static int get_pages(struct drm_gem_obj
        struct omap_gem_object *omap_obj = to_omap_bo(obj);
        int ret = 0;
  
-       if (is_shmem(obj) && !omap_obj->pages) {
+       if ((omap_obj->flags & OMAP_BO_MEM_SHMEM) && !omap_obj->pages) {
                ret = omap_gem_attach_pages(obj);
                if (ret) {
                        dev_err(obj->dev->dev, "could not attach pages\n");
@@@ -396,7 -414,7 +414,7 @@@ static int fault_1d(struct drm_gem_obje
                omap_gem_cpu_sync(obj, pgoff);
                pfn = page_to_pfn(omap_obj->pages[pgoff]);
        } else {
-               BUG_ON(!(omap_obj->flags & OMAP_BO_DMA));
+               BUG_ON(!is_contiguous(omap_obj));
                pfn = (omap_obj->paddr >> PAGE_SHIFT) + pgoff;
        }
  
@@@ -560,6 -578,11 +578,11 @@@ fail
        case 0:
        case -ERESTARTSYS:
        case -EINTR:
+       case -EBUSY:
+               /*
+                * EBUSY is ok: this just means that another thread
+                * already did the job.
+                */
                return VM_FAULT_NOPAGE;
        case -ENOMEM:
                return VM_FAULT_OOM;
@@@ -728,7 -751,8 +751,8 @@@ fail
  static inline bool is_cached_coherent(struct drm_gem_object *obj)
  {
        struct omap_gem_object *omap_obj = to_omap_bo(obj);
-       return is_shmem(obj) &&
+       return (omap_obj->flags & OMAP_BO_MEM_SHMEM) &&
                ((omap_obj->flags & OMAP_BO_CACHE_MASK) == OMAP_BO_CACHED);
  }
  
@@@ -761,9 -785,20 +785,20 @@@ void omap_gem_dma_sync(struct drm_gem_o
  
                for (i = 0; i < npages; i++) {
                        if (!omap_obj->addrs[i]) {
-                               omap_obj->addrs[i] = dma_map_page(dev->dev, pages[i], 0,
+                               dma_addr_t addr;
+                               addr = dma_map_page(dev->dev, pages[i], 0,
                                                PAGE_SIZE, DMA_BIDIRECTIONAL);
+                               if (dma_mapping_error(dev->dev, addr)) {
+                                       dev_warn(dev->dev,
+                                               "%s: failed to map page\n",
+                                               __func__);
+                                       break;
+                               }
                                dirty = true;
+                               omap_obj->addrs[i] = addr;
                        }
                }
  
@@@ -787,7 -822,7 +822,7 @@@ int omap_gem_get_paddr(struct drm_gem_o
  
        mutex_lock(&obj->dev->struct_mutex);
  
-       if (remap && is_shmem(obj) && priv->has_dmm) {
+       if (!is_contiguous(omap_obj) && remap && priv->has_dmm) {
                if (omap_obj->paddr_cnt == 0) {
                        struct page **pages;
                        uint32_t npages = obj->size >> PAGE_SHIFT;
                omap_obj->paddr_cnt++;
  
                *paddr = omap_obj->paddr;
-       } else if (omap_obj->flags & OMAP_BO_DMA) {
+       } else if (is_contiguous(omap_obj)) {
                *paddr = omap_obj->paddr;
        } else {
                ret = -EINVAL;
@@@ -1138,20 -1173,6 +1173,6 @@@ unlock
        return ret;
  }
  
- /* it is a bit lame to handle updates in this sort of polling way, but
-  * in case of PVR, the GPU can directly update read/write complete
-  * values, and not really tell us which ones it updated.. this also
-  * means that sync_lock is not quite sufficient.  So we'll need to
-  * do something a bit better when it comes time to add support for
-  * separate 2d hw..
-  */
- void omap_gem_op_update(void)
- {
-       spin_lock(&sync_lock);
-       sync_op_update();
-       spin_unlock(&sync_lock);
- }
  /* mark the start of read and/or write operation */
  int omap_gem_op_start(struct drm_gem_object *obj, enum omap_gem_op op)
  {
@@@ -1219,7 -1240,7 +1240,7 @@@ int omap_gem_op_sync(struct drm_gem_obj
   * is currently blocked..  fxn() can be called from any context
   *
   * (TODO for now fxn is called back from whichever context calls
-  * omap_gem_op_update().. but this could be better defined later
+  * omap_gem_op_finish().. but this could be better defined later
   * if needed)
   *
   * TODO more code in common w/ _sync()..
@@@ -1261,50 -1282,10 +1282,10 @@@ int omap_gem_op_async(struct drm_gem_ob
        return 0;
  }
  
- /* special API so PVR can update the buffer to use a sync-object allocated
-  * from it's sync-obj heap.  Only used for a newly allocated (from PVR's
-  * perspective) sync-object, so we overwrite the new syncobj w/ values
-  * from the already allocated syncobj (if there is one)
-  */
- int omap_gem_set_sync_object(struct drm_gem_object *obj, void *syncobj)
- {
-       struct omap_gem_object *omap_obj = to_omap_bo(obj);
-       int ret = 0;
-       spin_lock(&sync_lock);
-       if ((omap_obj->flags & OMAP_BO_EXT_SYNC) && !syncobj) {
-               /* clearing a previously set syncobj */
-               syncobj = kmemdup(omap_obj->sync, sizeof(*omap_obj->sync),
-                                 GFP_ATOMIC);
-               if (!syncobj) {
-                       ret = -ENOMEM;
-                       goto unlock;
-               }
-               omap_obj->flags &= ~OMAP_BO_EXT_SYNC;
-               omap_obj->sync = syncobj;
-       } else if (syncobj && !(omap_obj->flags & OMAP_BO_EXT_SYNC)) {
-               /* replacing an existing syncobj */
-               if (omap_obj->sync) {
-                       memcpy(syncobj, omap_obj->sync, sizeof(*omap_obj->sync));
-                       kfree(omap_obj->sync);
-               }
-               omap_obj->flags |= OMAP_BO_EXT_SYNC;
-               omap_obj->sync = syncobj;
-       }
- unlock:
-       spin_unlock(&sync_lock);
-       return ret;
- }
  /* -----------------------------------------------------------------------------
   * Constructor & Destructor
   */
  
- /* don't call directly.. called from GEM core when it is time to actually
-  * free the object..
-  */
  void omap_gem_free_object(struct drm_gem_object *obj)
  {
        struct drm_device *dev = obj->dev;
         */
        WARN_ON(omap_obj->paddr_cnt > 0);
  
-       /* don't free externally allocated backing memory */
-       if (!(omap_obj->flags & OMAP_BO_EXT_MEM)) {
-               if (omap_obj->pages)
+       if (omap_obj->pages) {
+               if (omap_obj->flags & OMAP_BO_MEM_DMABUF)
+                       kfree(omap_obj->pages);
+               else
                        omap_gem_detach_pages(obj);
+       }
  
-               if (!is_shmem(obj)) {
-                       dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
-                                   omap_obj->paddr);
-               } else if (omap_obj->vaddr) {
-                       vunmap(omap_obj->vaddr);
-               }
+       if (omap_obj->flags & OMAP_BO_MEM_DMA_API) {
 -              dma_free_writecombine(dev->dev, obj->size,
 -                              omap_obj->vaddr, omap_obj->paddr);
++              dma_free_wc(dev->dev, obj->size, omap_obj->vaddr,
++                          omap_obj->paddr);
+       } else if (omap_obj->vaddr) {
+               vunmap(omap_obj->vaddr);
+       } else if (obj->import_attach) {
+               drm_prime_gem_destroy(obj, omap_obj->sgt);
        }
  
-       /* don't free externally allocated syncobj */
-       if (!(omap_obj->flags & OMAP_BO_EXT_SYNC))
-               kfree(omap_obj->sync);
+       kfree(omap_obj->sync);
  
        drm_gem_object_release(obj);
  
@@@ -1357,84 -1339,160 +1339,160 @@@ struct drm_gem_object *omap_gem_new(str
        size_t size;
        int ret;
  
+       /* Validate the flags and compute the memory and cache flags. */
        if (flags & OMAP_BO_TILED) {
                if (!priv->usergart) {
                        dev_err(dev->dev, "Tiled buffers require DMM\n");
                        return NULL;
                }
  
-               /* tiled buffers are always shmem paged backed.. when they are
-                * scanned out, they are remapped into DMM/TILER
+               /*
+                * Tiled buffers are always shmem paged backed. When they are
+                * scanned out, they are remapped into DMM/TILER.
                 */
                flags &= ~OMAP_BO_SCANOUT;
+               flags |= OMAP_BO_MEM_SHMEM;
  
-               /* currently don't allow cached buffers.. there is some caching
-                * stuff that needs to be handled better
+               /*
+                * Currently don't allow cached buffers. There is some caching
+                * stuff that needs to be handled better.
                 */
                flags &= ~(OMAP_BO_CACHED|OMAP_BO_WC|OMAP_BO_UNCACHED);
                flags |= tiler_get_cpu_cache_flags();
-               /* align dimensions to slot boundaries... */
-               tiler_align(gem2fmt(flags),
-                               &gsize.tiled.width, &gsize.tiled.height);
-               /* ...and calculate size based on aligned dimensions */
-               size = tiler_size(gem2fmt(flags),
-                               gsize.tiled.width, gsize.tiled.height);
-       } else {
-               size = PAGE_ALIGN(gsize.bytes);
+       } else if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
+               /*
+                * OMAP_BO_SCANOUT hints that the buffer doesn't need to be
+                * tiled. However, to lower the pressure on memory allocation,
+                * use contiguous memory only if no TILER is available.
+                */
+               flags |= OMAP_BO_MEM_DMA_API;
+       } else if (!(flags & OMAP_BO_MEM_DMABUF)) {
+               /*
+                * All other buffers not backed by dma_buf are shmem-backed.
+                */
+               flags |= OMAP_BO_MEM_SHMEM;
        }
  
+       /* Allocate the initialize the OMAP GEM object. */
        omap_obj = kzalloc(sizeof(*omap_obj), GFP_KERNEL);
        if (!omap_obj)
                return NULL;
  
        obj = &omap_obj->base;
+       omap_obj->flags = flags;
  
-       if ((flags & OMAP_BO_SCANOUT) && !priv->has_dmm) {
-               /* attempt to allocate contiguous memory if we don't
-                * have DMM for remappign discontiguous buffers
+       if (flags & OMAP_BO_TILED) {
+               /*
+                * For tiled buffers align dimensions to slot boundaries and
+                * calculate size based on aligned dimensions.
                 */
-               omap_obj->vaddr =  dma_alloc_wc(dev->dev, size,
-                                               &omap_obj->paddr, GFP_KERNEL);
-               if (!omap_obj->vaddr) {
-                       kfree(omap_obj);
+               tiler_align(gem2fmt(flags), &gsize.tiled.width,
+                           &gsize.tiled.height);
  
-                       return NULL;
-               }
-               flags |= OMAP_BO_DMA;
-       }
+               size = tiler_size(gem2fmt(flags), gsize.tiled.width,
+                                 gsize.tiled.height);
  
-       spin_lock(&priv->list_lock);
-       list_add(&omap_obj->mm_list, &priv->obj_list);
-       spin_unlock(&priv->list_lock);
-       omap_obj->flags = flags;
-       if (flags & OMAP_BO_TILED) {
                omap_obj->width = gsize.tiled.width;
                omap_obj->height = gsize.tiled.height;
+       } else {
+               size = PAGE_ALIGN(gsize.bytes);
        }
  
-       if (flags & (OMAP_BO_DMA|OMAP_BO_EXT_MEM)) {
+       /* Initialize the GEM object. */
+       if (!(flags & OMAP_BO_MEM_SHMEM)) {
                drm_gem_private_object_init(dev, obj, size);
        } else {
                ret = drm_gem_object_init(dev, obj, size);
                if (ret)
-                       goto fail;
+                       goto err_free;
  
                mapping = file_inode(obj->filp)->i_mapping;
                mapping_set_gfp_mask(mapping, GFP_USER | __GFP_DMA32);
        }
  
 -              omap_obj->vaddr = dma_alloc_writecombine(dev->dev, size,
 -                                                       &omap_obj->paddr,
 -                                                       GFP_KERNEL);
+       /* Allocate memory if needed. */
+       if (flags & OMAP_BO_MEM_DMA_API) {
++              omap_obj->vaddr = dma_alloc_wc(dev->dev, size,
++                                             &omap_obj->paddr,
++                                             GFP_KERNEL);
+               if (!omap_obj->vaddr)
+                       goto err_release;
+       }
+       spin_lock(&priv->list_lock);
+       list_add(&omap_obj->mm_list, &priv->obj_list);
+       spin_unlock(&priv->list_lock);
        return obj;
  
- fail:
-       omap_gem_free_object(obj);
+ err_release:
+       drm_gem_object_release(obj);
+ err_free:
+       kfree(omap_obj);
        return NULL;
  }
  
+ struct drm_gem_object *omap_gem_new_dmabuf(struct drm_device *dev, size_t size,
+                                          struct sg_table *sgt)
+ {
+       struct omap_drm_private *priv = dev->dev_private;
+       struct omap_gem_object *omap_obj;
+       struct drm_gem_object *obj;
+       union omap_gem_size gsize;
+       /* Without a DMM only physically contiguous buffers can be supported. */
+       if (sgt->orig_nents != 1 && !priv->has_dmm)
+               return ERR_PTR(-EINVAL);
+       mutex_lock(&dev->struct_mutex);
+       gsize.bytes = PAGE_ALIGN(size);
+       obj = omap_gem_new(dev, gsize, OMAP_BO_MEM_DMABUF | OMAP_BO_WC);
+       if (!obj) {
+               obj = ERR_PTR(-ENOMEM);
+               goto done;
+       }
+       omap_obj = to_omap_bo(obj);
+       omap_obj->sgt = sgt;
+       if (sgt->orig_nents == 1) {
+               omap_obj->paddr = sg_dma_address(sgt->sgl);
+       } else {
+               /* Create pages list from sgt */
+               struct sg_page_iter iter;
+               struct page **pages;
+               unsigned int npages;
+               unsigned int i = 0;
+               npages = DIV_ROUND_UP(size, PAGE_SIZE);
+               pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
+               if (!pages) {
+                       omap_gem_free_object(obj);
+                       obj = ERR_PTR(-ENOMEM);
+                       goto done;
+               }
+               omap_obj->pages = pages;
+               for_each_sg_page(sgt->sgl, &iter, sgt->orig_nents, 0) {
+                       pages[i++] = sg_page_iter_page(&iter);
+                       if (i > npages)
+                               break;
+               }
+               if (WARN_ON(i != npages)) {
+                       omap_gem_free_object(obj);
+                       obj = ERR_PTR(-ENOMEM);
+                       goto done;
+               }
+       }
+ done:
+       mutex_unlock(&dev->struct_mutex);
+       return obj;
+ }
  /* convenience method to construct a GEM buffer object, and userspace handle */
  int omap_gem_new_handle(struct drm_device *dev, struct drm_file *file,
                union omap_gem_size gsize, uint32_t flags, uint32_t *handle)
index bd736ace3f8147b25c3362cb2e46aeeef2ef87b8,82b5711fefeffdd28f7c06eeec3979d94159f583..3abb400151acaf304652044013f55fd0091e6428
@@@ -5,12 -5,10 +5,10 @@@
   *          for STMicroelectronics.
   * License terms:  GNU General Public License (GPL), version 2
   */
- #include <drm/drmP.h>
  
- #include <drm/drm_atomic_helper.h>
+ #include <drm/drm_atomic.h>
  #include <drm/drm_fb_cma_helper.h>
  #include <drm/drm_gem_cma_helper.h>
- #include <drm/drm_plane_helper.h>
  
  #include "sti_compositor.h"
  #include "sti_cursor.h"
@@@ -74,6 -72,82 +72,82 @@@ static const uint32_t cursor_supported_
  
  #define to_sti_cursor(x) container_of(x, struct sti_cursor, plane)
  
+ #define DBGFS_DUMP(reg) seq_printf(s, "\n  %-25s 0x%08X", #reg, \
+                                  readl(cursor->regs + reg))
+ static void cursor_dbg_vpo(struct seq_file *s, u32 val)
+ {
+       seq_printf(s, "\txdo:%4d\tydo:%4d", val & 0x0FFF, (val >> 16) & 0x0FFF);
+ }
+ static void cursor_dbg_size(struct seq_file *s, u32 val)
+ {
+       seq_printf(s, "\t%d x %d", val & 0x07FF, (val >> 16) & 0x07FF);
+ }
+ static void cursor_dbg_pml(struct seq_file *s,
+                          struct sti_cursor *cursor, u32 val)
+ {
+       if (cursor->pixmap.paddr == val)
+               seq_printf(s, "\tVirt @: %p", cursor->pixmap.base);
+ }
+ static void cursor_dbg_cml(struct seq_file *s,
+                          struct sti_cursor *cursor, u32 val)
+ {
+       if (cursor->clut_paddr == val)
+               seq_printf(s, "\tVirt @: %p", cursor->clut);
+ }
+ static int cursor_dbg_show(struct seq_file *s, void *data)
+ {
+       struct drm_info_node *node = s->private;
+       struct sti_cursor *cursor = (struct sti_cursor *)node->info_ent->data;
+       struct drm_device *dev = node->minor->dev;
+       int ret;
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+       seq_printf(s, "%s: (vaddr = 0x%p)",
+                  sti_plane_to_str(&cursor->plane), cursor->regs);
+       DBGFS_DUMP(CUR_CTL);
+       DBGFS_DUMP(CUR_VPO);
+       cursor_dbg_vpo(s, readl(cursor->regs + CUR_VPO));
+       DBGFS_DUMP(CUR_PML);
+       cursor_dbg_pml(s, cursor, readl(cursor->regs + CUR_PML));
+       DBGFS_DUMP(CUR_PMP);
+       DBGFS_DUMP(CUR_SIZE);
+       cursor_dbg_size(s, readl(cursor->regs + CUR_SIZE));
+       DBGFS_DUMP(CUR_CML);
+       cursor_dbg_cml(s, cursor, readl(cursor->regs + CUR_CML));
+       DBGFS_DUMP(CUR_AWS);
+       DBGFS_DUMP(CUR_AWE);
+       seq_puts(s, "\n");
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+ }
+ static struct drm_info_list cursor_debugfs_files[] = {
+       { "cursor", cursor_dbg_show, 0, NULL },
+ };
+ static int cursor_debugfs_init(struct sti_cursor *cursor,
+                              struct drm_minor *minor)
+ {
+       unsigned int i;
+       for (i = 0; i < ARRAY_SIZE(cursor_debugfs_files); i++)
+               cursor_debugfs_files[i].data = cursor;
+       return drm_debugfs_create_files(cursor_debugfs_files,
+                                       ARRAY_SIZE(cursor_debugfs_files),
+                                       minor->debugfs_root, minor);
+ }
  static void sti_cursor_argb8888_to_clut8(struct sti_cursor *cursor, u32 *src)
  {
        u8  *dst = cursor->pixmap.base;
@@@ -110,35 -184,31 +184,31 @@@ static void sti_cursor_init(struct sti_
                                                  (b * 5);
  }
  
- static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
-                                    struct drm_plane_state *oldstate)
+ static int sti_cursor_atomic_check(struct drm_plane *drm_plane,
+                                  struct drm_plane_state *state)
  {
-       struct drm_plane_state *state = drm_plane->state;
        struct sti_plane *plane = to_sti_plane(drm_plane);
        struct sti_cursor *cursor = to_sti_cursor(plane);
        struct drm_crtc *crtc = state->crtc;
-       struct sti_mixer *mixer = to_sti_mixer(crtc);
        struct drm_framebuffer *fb = state->fb;
-       struct drm_display_mode *mode = &crtc->mode;
-       int dst_x = state->crtc_x;
-       int dst_y = state->crtc_y;
-       int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
-       int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+       struct drm_crtc_state *crtc_state;
+       struct drm_display_mode *mode;
+       int dst_x, dst_y, dst_w, dst_h;
+       int src_w, src_h;
+       /* no need for further checks if the plane is being disabled */
+       if (!crtc || !fb)
+               return 0;
+       crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
+       mode = &crtc_state->mode;
+       dst_x = state->crtc_x;
+       dst_y = state->crtc_y;
+       dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+       dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
        /* src_x are in 16.16 format */
-       int src_w = state->src_w >> 16;
-       int src_h = state->src_h >> 16;
-       bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
-       struct drm_gem_cma_object *cma_obj;
-       u32 y, x;
-       u32 val;
-       DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
-                     crtc->base.id, sti_mixer_to_str(mixer),
-                     drm_plane->base.id, sti_plane_to_str(plane));
-       DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", dst_w, dst_h, dst_x, dst_y);
-       dev_dbg(cursor->dev, "%s %s\n", __func__,
-               sti_plane_to_str(plane));
+       src_w = state->src_w >> 16;
+       src_h = state->src_h >> 16;
  
        if (src_w < STI_CURS_MIN_SIZE ||
            src_h < STI_CURS_MIN_SIZE ||
            src_h > STI_CURS_MAX_SIZE) {
                DRM_ERROR("Invalid cursor size (%dx%d)\n",
                                src_w, src_h);
-               return;
+               return -EINVAL;
        }
  
        /* If the cursor size has changed, re-allocated the pixmap */
                cursor->height = src_h;
  
                if (cursor->pixmap.base)
 -                      dma_free_writecombine(cursor->dev,
 -                                            cursor->pixmap.size,
 -                                            cursor->pixmap.base,
 -                                            cursor->pixmap.paddr);
 +                      dma_free_wc(cursor->dev, cursor->pixmap.size,
 +                                  cursor->pixmap.base, cursor->pixmap.paddr);
  
                cursor->pixmap.size = cursor->width * cursor->height;
  
 -              cursor->pixmap.base = dma_alloc_writecombine(cursor->dev,
 -                                                      cursor->pixmap.size,
 -                                                      &cursor->pixmap.paddr,
 -                                                      GFP_KERNEL | GFP_DMA);
 +              cursor->pixmap.base = dma_alloc_wc(cursor->dev,
 +                                                 cursor->pixmap.size,
 +                                                 &cursor->pixmap.paddr,
 +                                                 GFP_KERNEL | GFP_DMA);
                if (!cursor->pixmap.base) {
                        DRM_ERROR("Failed to allocate memory for pixmap\n");
-                       return;
+                       return -EINVAL;
                }
        }
  
-       cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
-       if (!cma_obj) {
+       if (!drm_fb_cma_get_gem_obj(fb, 0)) {
                DRM_ERROR("Can't get CMA GEM object for fb\n");
-               return;
+               return -EINVAL;
        }
  
+       DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
+                     crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)),
+                     drm_plane->base.id, sti_plane_to_str(plane));
+       DRM_DEBUG_KMS("(%dx%d)@(%d,%d)\n", dst_w, dst_h, dst_x, dst_y);
+       return 0;
+ }
+ static void sti_cursor_atomic_update(struct drm_plane *drm_plane,
+                                    struct drm_plane_state *oldstate)
+ {
+       struct drm_plane_state *state = drm_plane->state;
+       struct sti_plane *plane = to_sti_plane(drm_plane);
+       struct sti_cursor *cursor = to_sti_cursor(plane);
+       struct drm_crtc *crtc = state->crtc;
+       struct drm_framebuffer *fb = state->fb;
+       struct drm_display_mode *mode;
+       int dst_x, dst_y;
+       struct drm_gem_cma_object *cma_obj;
+       u32 y, x;
+       u32 val;
+       if (!crtc || !fb)
+               return;
+       mode = &crtc->mode;
+       dst_x = state->crtc_x;
+       dst_y = state->crtc_y;
+       cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
        /* Convert ARGB8888 to CLUT8 */
        sti_cursor_argb8888_to_clut8(cursor, (u32 *)cma_obj->vaddr);
  
        val = y << 16 | x;
        writel(val, cursor->regs + CUR_AWE);
  
-       if (first_prepare) {
-               /* Set and fetch CLUT */
-               writel(cursor->clut_paddr, cursor->regs + CUR_CML);
-               writel(CUR_CTL_CLUT_UPDATE, cursor->regs + CUR_CTL);
-       }
        /* Set memory location, size, and position */
        writel(cursor->pixmap.paddr, cursor->regs + CUR_PML);
        writel(cursor->width, cursor->regs + CUR_PMP);
        writel(cursor->height << 16 | cursor->width, cursor->regs + CUR_SIZE);
  
        y = sti_vtg_get_line_number(*mode, dst_y);
-       x = sti_vtg_get_pixel_number(*mode, dst_y);
+       x = sti_vtg_get_pixel_number(*mode, dst_x);
        writel((y << 16) | x, cursor->regs + CUR_VPO);
  
+       /* Set and fetch CLUT */
+       writel(cursor->clut_paddr, cursor->regs + CUR_CML);
+       writel(CUR_CTL_CLUT_UPDATE, cursor->regs + CUR_CTL);
+       sti_plane_update_fps(plane, true, false);
        plane->status = STI_PLANE_UPDATED;
  }
  
@@@ -213,7 -315,6 +313,6 @@@ static void sti_cursor_atomic_disable(s
                                      struct drm_plane_state *oldstate)
  {
        struct sti_plane *plane = to_sti_plane(drm_plane);
-       struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
  
        if (!drm_plane->crtc) {
                DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
        }
  
        DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
-                        drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+                        drm_plane->crtc->base.id,
+                        sti_mixer_to_str(to_sti_mixer(drm_plane->crtc)),
                         drm_plane->base.id, sti_plane_to_str(plane));
  
        plane->status = STI_PLANE_DISABLING;
  }
  
  static const struct drm_plane_helper_funcs sti_cursor_helpers_funcs = {
+       .atomic_check = sti_cursor_atomic_check,
        .atomic_update = sti_cursor_atomic_update,
        .atomic_disable = sti_cursor_atomic_disable,
  };
@@@ -250,8 -353,8 +351,8 @@@ struct drm_plane *sti_cursor_create(str
  
        /* Allocate clut buffer */
        size = 0x100 * sizeof(unsigned short);
 -      cursor->clut = dma_alloc_writecombine(dev, size, &cursor->clut_paddr,
 -                                            GFP_KERNEL | GFP_DMA);
 +      cursor->clut = dma_alloc_wc(dev, size, &cursor->clut_paddr,
 +                                  GFP_KERNEL | GFP_DMA);
  
        if (!cursor->clut) {
                DRM_ERROR("Failed to allocate memory for cursor clut\n");
  
        sti_plane_init_property(&cursor->plane, DRM_PLANE_TYPE_CURSOR);
  
+       if (cursor_debugfs_init(cursor, drm_dev->primary))
+               DRM_ERROR("CURSOR debugfs setup failed\n");
        return &cursor->plane.drm_plane;
  
  err_plane:
 -      dma_free_writecombine(dev, size, cursor->clut, cursor->clut_paddr);
 +      dma_free_wc(dev, size, cursor->clut, cursor->clut_paddr);
  err_clut:
        devm_kfree(dev, cursor);
        return NULL;
index 514551c857bbda87b8fcbbca215bdc17c7bdfe67,67f606a41c3f0c0877c9a40091ab6d30b5e9255b..ff3d3e7e770418f2830a474ab03793020161ab39
@@@ -6,9 -6,7 +6,7 @@@
   * License terms:  GNU General Public License (GPL), version 2
   */
  
- #include <linux/clk.h>
- #include <linux/dma-mapping.h>
+ #include <drm/drm_atomic.h>
  #include <drm/drm_fb_cma_helper.h>
  #include <drm/drm_gem_cma_helper.h>
  
  #define GDP_ABGR8888    (GDP_ARGB8888 | BIGNOTLITTLE | ALPHASWITCH)
  #define GDP_ARGB1555    0x06
  #define GDP_ARGB4444    0x07
- #define GDP_CLUT8       0x0B
- #define GDP_YCBR888     0x10
- #define GDP_YCBR422R    0x12
- #define GDP_AYCBR8888   0x15
+ #define GDP2STR(fmt) { GDP_ ## fmt, #fmt }
+ static struct gdp_format_to_str {
+       int format;
+       char name[20];
+ } gdp_format_to_str[] = {
+               GDP2STR(RGB565),
+               GDP2STR(RGB888),
+               GDP2STR(RGB888_32),
+               GDP2STR(XBGR8888),
+               GDP2STR(ARGB8565),
+               GDP2STR(ARGB8888),
+               GDP2STR(ABGR8888),
+               GDP2STR(ARGB1555),
+               GDP2STR(ARGB4444)
+               };
  
  #define GAM_GDP_CTL_OFFSET      0x00
  #define GAM_GDP_AGC_OFFSET      0x04
@@@ -97,6 -108,7 +108,7 @@@ struct sti_gdp_node_list 
   * @vtg_field_nb:       callback for VTG FIELD (top or bottom) notification
   * @is_curr_top:        true if the current node processed is the top field
   * @node_list:          array of node list
+  * @vtg:                registered vtg
   */
  struct sti_gdp {
        struct sti_plane plane;
        struct notifier_block vtg_field_nb;
        bool is_curr_top;
        struct sti_gdp_node_list node_list[GDP_NODE_NB_BANK];
+       struct sti_vtg *vtg;
  };
  
  #define to_sti_gdp(x) container_of(x, struct sti_gdp, plane)
@@@ -121,12 -134,224 +134,224 @@@ static const uint32_t gdp_supported_for
        DRM_FORMAT_ARGB1555,
        DRM_FORMAT_RGB565,
        DRM_FORMAT_RGB888,
-       DRM_FORMAT_AYUV,
-       DRM_FORMAT_YUV444,
-       DRM_FORMAT_VYUY,
-       DRM_FORMAT_C8,
  };
  
+ #define DBGFS_DUMP(reg) seq_printf(s, "\n  %-25s 0x%08X", #reg, \
+                                  readl(gdp->regs + reg ## _OFFSET))
+ static void gdp_dbg_ctl(struct seq_file *s, int val)
+ {
+       int i;
+       seq_puts(s, "\tColor:");
+       for (i = 0; i < ARRAY_SIZE(gdp_format_to_str); i++) {
+               if (gdp_format_to_str[i].format == (val & 0x1F)) {
+                       seq_printf(s, gdp_format_to_str[i].name);
+                       break;
+               }
+       }
+       if (i == ARRAY_SIZE(gdp_format_to_str))
+               seq_puts(s, "<UNKNOWN>");
+       seq_printf(s, "\tWaitNextVsync:%d", val & WAIT_NEXT_VSYNC ? 1 : 0);
+ }
+ static void gdp_dbg_vpo(struct seq_file *s, int val)
+ {
+       seq_printf(s, "\txdo:%4d\tydo:%4d", val & 0xFFFF, (val >> 16) & 0xFFFF);
+ }
+ static void gdp_dbg_vps(struct seq_file *s, int val)
+ {
+       seq_printf(s, "\txds:%4d\tyds:%4d", val & 0xFFFF, (val >> 16) & 0xFFFF);
+ }
+ static void gdp_dbg_size(struct seq_file *s, int val)
+ {
+       seq_printf(s, "\t%d x %d", val & 0xFFFF, (val >> 16) & 0xFFFF);
+ }
+ static void gdp_dbg_nvn(struct seq_file *s, struct sti_gdp *gdp, int val)
+ {
+       void *base = NULL;
+       unsigned int i;
+       for (i = 0; i < GDP_NODE_NB_BANK; i++) {
+               if (gdp->node_list[i].top_field_paddr == val) {
+                       base = gdp->node_list[i].top_field;
+                       break;
+               }
+               if (gdp->node_list[i].btm_field_paddr == val) {
+                       base = gdp->node_list[i].btm_field;
+                       break;
+               }
+       }
+       if (base)
+               seq_printf(s, "\tVirt @: %p", base);
+ }
+ static void gdp_dbg_ppt(struct seq_file *s, int val)
+ {
+       if (val & GAM_GDP_PPT_IGNORE)
+               seq_puts(s, "\tNot displayed on mixer!");
+ }
+ static void gdp_dbg_mst(struct seq_file *s, int val)
+ {
+       if (val & 1)
+               seq_puts(s, "\tBUFFER UNDERFLOW!");
+ }
+ static int gdp_dbg_show(struct seq_file *s, void *data)
+ {
+       struct drm_info_node *node = s->private;
+       struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
+       struct drm_device *dev = node->minor->dev;
+       struct drm_plane *drm_plane = &gdp->plane.drm_plane;
+       struct drm_crtc *crtc = drm_plane->crtc;
+       int ret;
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+       seq_printf(s, "%s: (vaddr = 0x%p)",
+                  sti_plane_to_str(&gdp->plane), gdp->regs);
+       DBGFS_DUMP(GAM_GDP_CTL);
+       gdp_dbg_ctl(s, readl(gdp->regs + GAM_GDP_CTL_OFFSET));
+       DBGFS_DUMP(GAM_GDP_AGC);
+       DBGFS_DUMP(GAM_GDP_VPO);
+       gdp_dbg_vpo(s, readl(gdp->regs + GAM_GDP_VPO_OFFSET));
+       DBGFS_DUMP(GAM_GDP_VPS);
+       gdp_dbg_vps(s, readl(gdp->regs + GAM_GDP_VPS_OFFSET));
+       DBGFS_DUMP(GAM_GDP_PML);
+       DBGFS_DUMP(GAM_GDP_PMP);
+       DBGFS_DUMP(GAM_GDP_SIZE);
+       gdp_dbg_size(s, readl(gdp->regs + GAM_GDP_SIZE_OFFSET));
+       DBGFS_DUMP(GAM_GDP_NVN);
+       gdp_dbg_nvn(s, gdp, readl(gdp->regs + GAM_GDP_NVN_OFFSET));
+       DBGFS_DUMP(GAM_GDP_KEY1);
+       DBGFS_DUMP(GAM_GDP_KEY2);
+       DBGFS_DUMP(GAM_GDP_PPT);
+       gdp_dbg_ppt(s, readl(gdp->regs + GAM_GDP_PPT_OFFSET));
+       DBGFS_DUMP(GAM_GDP_CML);
+       DBGFS_DUMP(GAM_GDP_MST);
+       gdp_dbg_mst(s, readl(gdp->regs + GAM_GDP_MST_OFFSET));
+       seq_puts(s, "\n\n");
+       if (!crtc)
+               seq_puts(s, "  Not connected to any DRM CRTC\n");
+       else
+               seq_printf(s, "  Connected to DRM CRTC #%d (%s)\n",
+                          crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)));
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+ }
+ static void gdp_node_dump_node(struct seq_file *s, struct sti_gdp_node *node)
+ {
+       seq_printf(s, "\t@:0x%p", node);
+       seq_printf(s, "\n\tCTL  0x%08X", node->gam_gdp_ctl);
+       gdp_dbg_ctl(s, node->gam_gdp_ctl);
+       seq_printf(s, "\n\tAGC  0x%08X", node->gam_gdp_agc);
+       seq_printf(s, "\n\tVPO  0x%08X", node->gam_gdp_vpo);
+       gdp_dbg_vpo(s, node->gam_gdp_vpo);
+       seq_printf(s, "\n\tVPS  0x%08X", node->gam_gdp_vps);
+       gdp_dbg_vps(s, node->gam_gdp_vps);
+       seq_printf(s, "\n\tPML  0x%08X", node->gam_gdp_pml);
+       seq_printf(s, "\n\tPMP  0x%08X", node->gam_gdp_pmp);
+       seq_printf(s, "\n\tSIZE 0x%08X", node->gam_gdp_size);
+       gdp_dbg_size(s, node->gam_gdp_size);
+       seq_printf(s, "\n\tNVN  0x%08X", node->gam_gdp_nvn);
+       seq_printf(s, "\n\tKEY1 0x%08X", node->gam_gdp_key1);
+       seq_printf(s, "\n\tKEY2 0x%08X", node->gam_gdp_key2);
+       seq_printf(s, "\n\tPPT  0x%08X", node->gam_gdp_ppt);
+       gdp_dbg_ppt(s, node->gam_gdp_ppt);
+       seq_printf(s, "\n\tCML  0x%08X", node->gam_gdp_cml);
+       seq_puts(s, "\n");
+ }
+ static int gdp_node_dbg_show(struct seq_file *s, void *arg)
+ {
+       struct drm_info_node *node = s->private;
+       struct sti_gdp *gdp = (struct sti_gdp *)node->info_ent->data;
+       struct drm_device *dev = node->minor->dev;
+       unsigned int b;
+       int ret;
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+       for (b = 0; b < GDP_NODE_NB_BANK; b++) {
+               seq_printf(s, "\n%s[%d].top", sti_plane_to_str(&gdp->plane), b);
+               gdp_node_dump_node(s, gdp->node_list[b].top_field);
+               seq_printf(s, "\n%s[%d].btm", sti_plane_to_str(&gdp->plane), b);
+               gdp_node_dump_node(s, gdp->node_list[b].btm_field);
+       }
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+ }
+ static struct drm_info_list gdp0_debugfs_files[] = {
+       { "gdp0", gdp_dbg_show, 0, NULL },
+       { "gdp0_node", gdp_node_dbg_show, 0, NULL },
+ };
+ static struct drm_info_list gdp1_debugfs_files[] = {
+       { "gdp1", gdp_dbg_show, 0, NULL },
+       { "gdp1_node", gdp_node_dbg_show, 0, NULL },
+ };
+ static struct drm_info_list gdp2_debugfs_files[] = {
+       { "gdp2", gdp_dbg_show, 0, NULL },
+       { "gdp2_node", gdp_node_dbg_show, 0, NULL },
+ };
+ static struct drm_info_list gdp3_debugfs_files[] = {
+       { "gdp3", gdp_dbg_show, 0, NULL },
+       { "gdp3_node", gdp_node_dbg_show, 0, NULL },
+ };
+ static int gdp_debugfs_init(struct sti_gdp *gdp, struct drm_minor *minor)
+ {
+       unsigned int i;
+       struct drm_info_list *gdp_debugfs_files;
+       int nb_files;
+       switch (gdp->plane.desc) {
+       case STI_GDP_0:
+               gdp_debugfs_files = gdp0_debugfs_files;
+               nb_files = ARRAY_SIZE(gdp0_debugfs_files);
+               break;
+       case STI_GDP_1:
+               gdp_debugfs_files = gdp1_debugfs_files;
+               nb_files = ARRAY_SIZE(gdp1_debugfs_files);
+               break;
+       case STI_GDP_2:
+               gdp_debugfs_files = gdp2_debugfs_files;
+               nb_files = ARRAY_SIZE(gdp2_debugfs_files);
+               break;
+       case STI_GDP_3:
+               gdp_debugfs_files = gdp3_debugfs_files;
+               nb_files = ARRAY_SIZE(gdp3_debugfs_files);
+               break;
+       default:
+               return -EINVAL;
+       }
+       for (i = 0; i < nb_files; i++)
+               gdp_debugfs_files[i].data = gdp;
+       return drm_debugfs_create_files(gdp_debugfs_files,
+                                       nb_files,
+                                       minor->debugfs_root, minor);
+ }
  static int sti_gdp_fourcc2format(int fourcc)
  {
        switch (fourcc) {
                return GDP_RGB565;
        case DRM_FORMAT_RGB888:
                return GDP_RGB888;
-       case DRM_FORMAT_AYUV:
-               return GDP_AYCBR8888;
-       case DRM_FORMAT_YUV444:
-               return GDP_YCBR888;
-       case DRM_FORMAT_VYUY:
-               return GDP_YCBR422R;
-       case DRM_FORMAT_C8:
-               return GDP_CLUT8;
        }
        return -1;
  }
@@@ -163,7 -380,6 +380,6 @@@ static int sti_gdp_get_alpharange(int f
        switch (format) {
        case GDP_ARGB8565:
        case GDP_ARGB8888:
-       case GDP_AYCBR8888:
        case GDP_ABGR8888:
                return GAM_GDP_ALPHARANGE_255;
        }
@@@ -240,9 -456,6 +456,6 @@@ end
   */
  static void sti_gdp_disable(struct sti_gdp *gdp)
  {
-       struct drm_plane *drm_plane = &gdp->plane.drm_plane;
-       struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
-       struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
        unsigned int i;
  
        DRM_DEBUG_DRIVER("%s\n", sti_plane_to_str(&gdp->plane));
                gdp->node_list[i].btm_field->gam_gdp_ppt |= GAM_GDP_PPT_IGNORE;
        }
  
-       if (sti_vtg_unregister_client(mixer->id == STI_MIXER_MAIN ?
-                       compo->vtg_main : compo->vtg_aux, &gdp->vtg_field_nb))
+       if (sti_vtg_unregister_client(gdp->vtg, &gdp->vtg_field_nb))
                DRM_DEBUG_DRIVER("Warning: cannot unregister VTG notifier\n");
  
        if (gdp->clk_pix)
@@@ -312,7 -524,8 +524,7 @@@ static void sti_gdp_init(struct sti_gd
        /* Allocate all the nodes within a single memory page */
        size = sizeof(struct sti_gdp_node) *
            GDP_NODE_PER_FIELD * GDP_NODE_NB_BANK;
 -      base = dma_alloc_writecombine(gdp->dev,
 -                                    size, &dma_addr, GFP_KERNEL | GFP_DMA);
 +      base = dma_alloc_wc(gdp->dev, size, &dma_addr, GFP_KERNEL | GFP_DMA);
  
        if (!base) {
                DRM_ERROR("Failed to allocate memory for GDP node\n");
        }
  }
  
- static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
-                                 struct drm_plane_state *oldstate)
+ /**
+  * sti_gdp_get_dst
+  * @dev: device
+  * @dst: requested destination size
+  * @src: source size
+  *
+  * Return the cropped / clamped destination size
+  *
+  * RETURNS:
+  * cropped / clamped destination size
+  */
+ static int sti_gdp_get_dst(struct device *dev, int dst, int src)
+ {
+       if (dst == src)
+               return dst;
+       if (dst < src) {
+               dev_dbg(dev, "WARNING: GDP scale not supported, will crop\n");
+               return dst;
+       }
+       dev_dbg(dev, "WARNING: GDP scale not supported, will clamp\n");
+       return src;
+ }
+ static int sti_gdp_atomic_check(struct drm_plane *drm_plane,
+                               struct drm_plane_state *state)
  {
-       struct drm_plane_state *state = drm_plane->state;
        struct sti_plane *plane = to_sti_plane(drm_plane);
        struct sti_gdp *gdp = to_sti_gdp(plane);
        struct drm_crtc *crtc = state->crtc;
        struct sti_compositor *compo = dev_get_drvdata(gdp->dev);
        struct drm_framebuffer *fb =  state->fb;
        bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
+       struct drm_crtc_state *crtc_state;
        struct sti_mixer *mixer;
        struct drm_display_mode *mode;
        int dst_x, dst_y, dst_w, dst_h;
        int src_x, src_y, src_w, src_h;
+       int format;
+       /* no need for further checks if the plane is being disabled */
+       if (!crtc || !fb)
+               return 0;
+       mixer = to_sti_mixer(crtc);
+       crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
+       mode = &crtc_state->mode;
+       dst_x = state->crtc_x;
+       dst_y = state->crtc_y;
+       dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+       dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+       /* src_x are in 16.16 format */
+       src_x = state->src_x >> 16;
+       src_y = state->src_y >> 16;
+       src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX);
+       src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX);
+       format = sti_gdp_fourcc2format(fb->pixel_format);
+       if (format == -1) {
+               DRM_ERROR("Format not supported by GDP %.4s\n",
+                         (char *)&fb->pixel_format);
+               return -EINVAL;
+       }
+       if (!drm_fb_cma_get_gem_obj(fb, 0)) {
+               DRM_ERROR("Can't get CMA GEM object for fb\n");
+               return -EINVAL;
+       }
+       if (first_prepare) {
+               /* Register gdp callback */
+               gdp->vtg = mixer->id == STI_MIXER_MAIN ?
+                                       compo->vtg_main : compo->vtg_aux;
+               if (sti_vtg_register_client(gdp->vtg,
+                                           &gdp->vtg_field_nb, crtc)) {
+                       DRM_ERROR("Cannot register VTG notifier\n");
+                       return -EINVAL;
+               }
+               /* Set and enable gdp clock */
+               if (gdp->clk_pix) {
+                       struct clk *clkp;
+                       int rate = mode->clock * 1000;
+                       int res;
+                       /*
+                        * According to the mixer used, the gdp pixel clock
+                        * should have a different parent clock.
+                        */
+                       if (mixer->id == STI_MIXER_MAIN)
+                               clkp = gdp->clk_main_parent;
+                       else
+                               clkp = gdp->clk_aux_parent;
+                       if (clkp)
+                               clk_set_parent(gdp->clk_pix, clkp);
+                       res = clk_set_rate(gdp->clk_pix, rate);
+                       if (res < 0) {
+                               DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
+                                         rate);
+                               return -EINVAL;
+                       }
+                       if (clk_prepare_enable(gdp->clk_pix)) {
+                               DRM_ERROR("Failed to prepare/enable gdp\n");
+                               return -EINVAL;
+                       }
+               }
+       }
+       DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
+                     crtc->base.id, sti_mixer_to_str(mixer),
+                     drm_plane->base.id, sti_plane_to_str(plane));
+       DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
+                     sti_plane_to_str(plane),
+                     dst_w, dst_h, dst_x, dst_y,
+                     src_w, src_h, src_x, src_y);
+       return 0;
+ }
+ static void sti_gdp_atomic_update(struct drm_plane *drm_plane,
+                                 struct drm_plane_state *oldstate)
+ {
+       struct drm_plane_state *state = drm_plane->state;
+       struct sti_plane *plane = to_sti_plane(drm_plane);
+       struct sti_gdp *gdp = to_sti_gdp(plane);
+       struct drm_crtc *crtc = state->crtc;
+       struct drm_framebuffer *fb =  state->fb;
+       struct drm_display_mode *mode;
+       int dst_x, dst_y, dst_w, dst_h;
+       int src_x, src_y, src_w, src_h;
        struct drm_gem_cma_object *cma_obj;
        struct sti_gdp_node_list *list;
        struct sti_gdp_node_list *curr_list;
        int format;
        unsigned int depth, bpp;
        u32 ydo, xdo, yds, xds;
-       int res;
  
-       /* Manage the case where crtc is null (disabled) */
-       if (!crtc)
+       if (!crtc || !fb)
                return;
  
-       mixer = to_sti_mixer(crtc);
        mode = &crtc->mode;
        dst_x = state->crtc_x;
        dst_y = state->crtc_y;
        /* src_x are in 16.16 format */
        src_x = state->src_x >> 16;
        src_y = state->src_y >> 16;
-       src_w = state->src_w >> 16;
-       src_h = state->src_h >> 16;
-       DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
-                     crtc->base.id, sti_mixer_to_str(mixer),
-                     drm_plane->base.id, sti_plane_to_str(plane));
-       DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
-                     sti_plane_to_str(plane),
-                     dst_w, dst_h, dst_x, dst_y,
-                     src_w, src_h, src_x, src_y);
+       src_w = clamp_val(state->src_w >> 16, 0, GAM_GDP_SIZE_MAX);
+       src_h = clamp_val(state->src_h >> 16, 0, GAM_GDP_SIZE_MAX);
  
        list = sti_gdp_get_free_nodes(gdp);
        top_field = list->top_field;
        top_field->gam_gdp_agc = GAM_GDP_AGC_FULL_RANGE;
        top_field->gam_gdp_ctl = WAIT_NEXT_VSYNC;
        format = sti_gdp_fourcc2format(fb->pixel_format);
-       if (format == -1) {
-               DRM_ERROR("Format not supported by GDP %.4s\n",
-                         (char *)&fb->pixel_format);
-               return;
-       }
        top_field->gam_gdp_ctl |= format;
        top_field->gam_gdp_ctl |= sti_gdp_get_alpharange(format);
        top_field->gam_gdp_ppt &= ~GAM_GDP_PPT_IGNORE;
  
        cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
-       if (!cma_obj) {
-               DRM_ERROR("Can't get CMA GEM object for fb\n");
-               return;
-       }
  
        DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
                         (char *)&fb->pixel_format,
        top_field->gam_gdp_pml += src_x * (bpp >> 3);
        top_field->gam_gdp_pml += src_y * fb->pitches[0];
  
-       /* input parameters */
-       top_field->gam_gdp_pmp = fb->pitches[0];
-       top_field->gam_gdp_size = clamp_val(src_h, 0, GAM_GDP_SIZE_MAX) << 16 |
-                                 clamp_val(src_w, 0, GAM_GDP_SIZE_MAX);
-       /* output parameters */
+       /* output parameters (clamped / cropped) */
+       dst_w = sti_gdp_get_dst(gdp->dev, dst_w, src_w);
+       dst_h = sti_gdp_get_dst(gdp->dev, dst_h, src_h);
        ydo = sti_vtg_get_line_number(*mode, dst_y);
        yds = sti_vtg_get_line_number(*mode, dst_y + dst_h - 1);
        xdo = sti_vtg_get_pixel_number(*mode, dst_x);
        top_field->gam_gdp_vpo = (ydo << 16) | xdo;
        top_field->gam_gdp_vps = (yds << 16) | xds;
  
+       /* input parameters */
+       src_w = dst_w;
+       top_field->gam_gdp_pmp = fb->pitches[0];
+       top_field->gam_gdp_size = src_h << 16 | src_w;
        /* Same content and chained together */
        memcpy(btm_field, top_field, sizeof(*btm_field));
        top_field->gam_gdp_nvn = list->btm_field_paddr;
                btm_field->gam_gdp_pml = top_field->gam_gdp_pml +
                                         fb->pitches[0];
  
-       if (first_prepare) {
-               /* Register gdp callback */
-               if (sti_vtg_register_client(mixer->id == STI_MIXER_MAIN ?
-                               compo->vtg_main : compo->vtg_aux,
-                               &gdp->vtg_field_nb, crtc)) {
-                       DRM_ERROR("Cannot register VTG notifier\n");
-                       return;
-               }
-               /* Set and enable gdp clock */
-               if (gdp->clk_pix) {
-                       struct clk *clkp;
-                       int rate = mode->clock * 1000;
-                       /* According to the mixer used, the gdp pixel clock
-                        * should have a different parent clock. */
-                       if (mixer->id == STI_MIXER_MAIN)
-                               clkp = gdp->clk_main_parent;
-                       else
-                               clkp = gdp->clk_aux_parent;
-                       if (clkp)
-                               clk_set_parent(gdp->clk_pix, clkp);
-                       res = clk_set_rate(gdp->clk_pix, rate);
-                       if (res < 0) {
-                               DRM_ERROR("Cannot set rate (%dHz) for gdp\n",
-                                         rate);
-                               return;
-                       }
-                       if (clk_prepare_enable(gdp->clk_pix)) {
-                               DRM_ERROR("Failed to prepare/enable gdp\n");
-                               return;
-                       }
-               }
-       }
        /* Update the NVN field of the 'right' field of the current GDP node
         * (being used by the HW) with the address of the updated ('free') top
         * field GDP node.
        }
  
  end:
+       sti_plane_update_fps(plane, true, false);
        plane->status = STI_PLANE_UPDATED;
  }
  
@@@ -580,7 -859,6 +858,6 @@@ static void sti_gdp_atomic_disable(stru
                                   struct drm_plane_state *oldstate)
  {
        struct sti_plane *plane = to_sti_plane(drm_plane);
-       struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
  
        if (!drm_plane->crtc) {
                DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
        }
  
        DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
-                        drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+                        drm_plane->crtc->base.id,
+                        sti_mixer_to_str(to_sti_mixer(drm_plane->crtc)),
                         drm_plane->base.id, sti_plane_to_str(plane));
  
        plane->status = STI_PLANE_DISABLING;
  }
  
  static const struct drm_plane_helper_funcs sti_gdp_helpers_funcs = {
+       .atomic_check = sti_gdp_atomic_check,
        .atomic_update = sti_gdp_atomic_update,
        .atomic_disable = sti_gdp_atomic_disable,
  };
@@@ -639,6 -919,9 +918,9 @@@ struct drm_plane *sti_gdp_create(struc
  
        sti_plane_init_property(&gdp->plane, type);
  
+       if (gdp_debugfs_init(gdp, drm_dev->primary))
+               DRM_ERROR("GDP debugfs setup failed\n");
        return &gdp->plane.drm_plane;
  
  err:
index 1d3c3d029603d030ec02809fd14026d6d5df08da,d7c1f427811d919d32c92b520a7967f6748d07c4..e05b0dc523ff93f3309c2aea9c2de032e7971636
@@@ -4,14 -4,11 +4,11 @@@
   * License terms:  GNU General Public License (GPL), version 2
   */
  
- #include <linux/clk.h>
  #include <linux/component.h>
  #include <linux/firmware.h>
- #include <linux/module.h>
- #include <linux/platform_device.h>
  #include <linux/reset.h>
  
- #include <drm/drmP.h>
+ #include <drm/drm_atomic.h>
  #include <drm/drm_fb_cma_helper.h>
  #include <drm/drm_gem_cma_helper.h>
  
@@@ -329,8 -326,6 +326,6 @@@ struct sti_hqvdp_cmd 
   * @reset:             reset control
   * @vtg_nb:            notifier to handle VTG Vsync
   * @btm_field_pending: is there any bottom field (interlaced frame) to display
-  * @curr_field_count:  number of field updates
-  * @last_field_count:  number of field updates since last fps measure
   * @hqvdp_cmd:         buffer of commands
   * @hqvdp_cmd_paddr:   physical address of hqvdp_cmd
   * @vtg:               vtg for main data path
@@@ -346,10 -341,8 +341,8 @@@ struct sti_hqvdp 
        struct reset_control *reset;
        struct notifier_block vtg_nb;
        bool btm_field_pending;
-       unsigned int curr_field_count;
-       unsigned int last_field_count;
        void *hqvdp_cmd;
-       dma_addr_t hqvdp_cmd_paddr;
+       u32 hqvdp_cmd_paddr;
        struct sti_vtg *vtg;
        bool xp70_initialized;
  };
@@@ -372,8 -365,8 +365,8 @@@ static const uint32_t hqvdp_supported_f
   */
  static int sti_hqvdp_get_free_cmd(struct sti_hqvdp *hqvdp)
  {
-       int curr_cmd, next_cmd;
-       dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
+       u32 curr_cmd, next_cmd;
+       u32 cmd = hqvdp->hqvdp_cmd_paddr;
        int i;
  
        curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
   */
  static int sti_hqvdp_get_curr_cmd(struct sti_hqvdp *hqvdp)
  {
-       int curr_cmd;
-       dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
+       u32 curr_cmd;
+       u32 cmd = hqvdp->hqvdp_cmd_paddr;
        unsigned int i;
  
        curr_cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
        return -1;
  }
  
+ /**
+  * sti_hqvdp_get_next_cmd
+  * @hqvdp: hqvdp structure
+  *
+  * Look for the next hqvdp_cmd that will be used by the FW.
+  *
+  * RETURNS:
+  *  the offset of the next command that will be used.
+  * -1 in error cases
+  */
+ static int sti_hqvdp_get_next_cmd(struct sti_hqvdp *hqvdp)
+ {
+       int next_cmd;
+       dma_addr_t cmd = hqvdp->hqvdp_cmd_paddr;
+       unsigned int i;
+       next_cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+       for (i = 0; i < NB_VDP_CMD; i++) {
+               if (cmd == next_cmd)
+                       return i * sizeof(struct sti_hqvdp_cmd);
+               cmd += sizeof(struct sti_hqvdp_cmd);
+       }
+       return -1;
+ }
+ #define DBGFS_DUMP(reg) seq_printf(s, "\n  %-25s 0x%08X", #reg, \
+                                  readl(hqvdp->regs + reg))
+ static const char *hqvdp_dbg_get_lut(u32 *coef)
+ {
+       if (!memcmp(coef, coef_lut_a_legacy, 16))
+               return "LUT A";
+       if (!memcmp(coef, coef_lut_b, 16))
+               return "LUT B";
+       if (!memcmp(coef, coef_lut_c_y_legacy, 16))
+               return "LUT C Y";
+       if (!memcmp(coef, coef_lut_c_c_legacy, 16))
+               return "LUT C C";
+       if (!memcmp(coef, coef_lut_d_y_legacy, 16))
+               return "LUT D Y";
+       if (!memcmp(coef, coef_lut_d_c_legacy, 16))
+               return "LUT D C";
+       if (!memcmp(coef, coef_lut_e_y_legacy, 16))
+               return "LUT E Y";
+       if (!memcmp(coef, coef_lut_e_c_legacy, 16))
+               return "LUT E C";
+       if (!memcmp(coef, coef_lut_f_y_legacy, 16))
+               return "LUT F Y";
+       if (!memcmp(coef, coef_lut_f_c_legacy, 16))
+               return "LUT F C";
+       return "<UNKNOWN>";
+ }
+ static void hqvdp_dbg_dump_cmd(struct seq_file *s, struct sti_hqvdp_cmd *c)
+ {
+       int src_w, src_h, dst_w, dst_h;
+       seq_puts(s, "\n\tTOP:");
+       seq_printf(s, "\n\t %-20s 0x%08X", "Config", c->top.config);
+       switch (c->top.config) {
+       case TOP_CONFIG_PROGRESSIVE:
+               seq_puts(s, "\tProgressive");
+               break;
+       case TOP_CONFIG_INTER_TOP:
+               seq_puts(s, "\tInterlaced, top field");
+               break;
+       case TOP_CONFIG_INTER_BTM:
+               seq_puts(s, "\tInterlaced, bottom field");
+               break;
+       default:
+               seq_puts(s, "\t<UNKNOWN>");
+               break;
+       }
+       seq_printf(s, "\n\t %-20s 0x%08X", "MemFormat", c->top.mem_format);
+       seq_printf(s, "\n\t %-20s 0x%08X", "CurrentY", c->top.current_luma);
+       seq_printf(s, "\n\t %-20s 0x%08X", "CurrentC", c->top.current_chroma);
+       seq_printf(s, "\n\t %-20s 0x%08X", "YSrcPitch", c->top.luma_src_pitch);
+       seq_printf(s, "\n\t %-20s 0x%08X", "CSrcPitch",
+                  c->top.chroma_src_pitch);
+       seq_printf(s, "\n\t %-20s 0x%08X", "InputFrameSize",
+                  c->top.input_frame_size);
+       seq_printf(s, "\t%dx%d",
+                  c->top.input_frame_size & 0x0000FFFF,
+                  c->top.input_frame_size >> 16);
+       seq_printf(s, "\n\t %-20s 0x%08X", "InputViewportSize",
+                  c->top.input_viewport_size);
+       src_w = c->top.input_viewport_size & 0x0000FFFF;
+       src_h = c->top.input_viewport_size >> 16;
+       seq_printf(s, "\t%dx%d", src_w, src_h);
+       seq_puts(s, "\n\tHVSRC:");
+       seq_printf(s, "\n\t %-20s 0x%08X", "OutputPictureSize",
+                  c->hvsrc.output_picture_size);
+       dst_w = c->hvsrc.output_picture_size & 0x0000FFFF;
+       dst_h = c->hvsrc.output_picture_size >> 16;
+       seq_printf(s, "\t%dx%d", dst_w, dst_h);
+       seq_printf(s, "\n\t %-20s 0x%08X", "ParamCtrl", c->hvsrc.param_ctrl);
+       seq_printf(s, "\n\t %-20s %s", "yh_coef",
+                  hqvdp_dbg_get_lut(c->hvsrc.yh_coef));
+       seq_printf(s, "\n\t %-20s %s", "ch_coef",
+                  hqvdp_dbg_get_lut(c->hvsrc.ch_coef));
+       seq_printf(s, "\n\t %-20s %s", "yv_coef",
+                  hqvdp_dbg_get_lut(c->hvsrc.yv_coef));
+       seq_printf(s, "\n\t %-20s %s", "cv_coef",
+                  hqvdp_dbg_get_lut(c->hvsrc.cv_coef));
+       seq_printf(s, "\n\t %-20s", "ScaleH");
+       if (dst_w > src_w)
+               seq_printf(s, " %d/1", dst_w / src_w);
+       else
+               seq_printf(s, " 1/%d", src_w / dst_w);
+       seq_printf(s, "\n\t %-20s", "tScaleV");
+       if (dst_h > src_h)
+               seq_printf(s, " %d/1", dst_h / src_h);
+       else
+               seq_printf(s, " 1/%d", src_h / dst_h);
+       seq_puts(s, "\n\tCSDI:");
+       seq_printf(s, "\n\t %-20s 0x%08X\t", "Config", c->csdi.config);
+       switch (c->csdi.config) {
+       case CSDI_CONFIG_PROG:
+               seq_puts(s, "Bypass");
+               break;
+       case CSDI_CONFIG_INTER_DIR:
+               seq_puts(s, "Deinterlace, directional");
+               break;
+       default:
+               seq_puts(s, "<UNKNOWN>");
+               break;
+       }
+       seq_printf(s, "\n\t %-20s 0x%08X", "Config2", c->csdi.config2);
+       seq_printf(s, "\n\t %-20s 0x%08X", "DcdiConfig", c->csdi.dcdi_config);
+ }
+ static int hqvdp_dbg_show(struct seq_file *s, void *data)
+ {
+       struct drm_info_node *node = s->private;
+       struct sti_hqvdp *hqvdp = (struct sti_hqvdp *)node->info_ent->data;
+       struct drm_device *dev = node->minor->dev;
+       int cmd, cmd_offset, infoxp70;
+       void *virt;
+       int ret;
+       ret = mutex_lock_interruptible(&dev->struct_mutex);
+       if (ret)
+               return ret;
+       seq_printf(s, "%s: (vaddr = 0x%p)",
+                  sti_plane_to_str(&hqvdp->plane), hqvdp->regs);
+       DBGFS_DUMP(HQVDP_MBX_IRQ_TO_XP70);
+       DBGFS_DUMP(HQVDP_MBX_INFO_HOST);
+       DBGFS_DUMP(HQVDP_MBX_IRQ_TO_HOST);
+       DBGFS_DUMP(HQVDP_MBX_INFO_XP70);
+       infoxp70 = readl(hqvdp->regs + HQVDP_MBX_INFO_XP70);
+       seq_puts(s, "\tFirmware state: ");
+       if (infoxp70 & INFO_XP70_FW_READY)
+               seq_puts(s, "idle and ready");
+       else if (infoxp70 & INFO_XP70_FW_PROCESSING)
+               seq_puts(s, "processing a picture");
+       else if (infoxp70 & INFO_XP70_FW_INITQUEUES)
+               seq_puts(s, "programming queues");
+       else
+               seq_puts(s, "NOT READY");
+       DBGFS_DUMP(HQVDP_MBX_SW_RESET_CTRL);
+       DBGFS_DUMP(HQVDP_MBX_STARTUP_CTRL1);
+       if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL1)
+                                       & STARTUP_CTRL1_RST_DONE)
+               seq_puts(s, "\tReset is done");
+       else
+               seq_puts(s, "\tReset is NOT done");
+       DBGFS_DUMP(HQVDP_MBX_STARTUP_CTRL2);
+       if (readl(hqvdp->regs + HQVDP_MBX_STARTUP_CTRL2)
+                                       & STARTUP_CTRL2_FETCH_EN)
+               seq_puts(s, "\tFetch is enabled");
+       else
+               seq_puts(s, "\tFetch is NOT enabled");
+       DBGFS_DUMP(HQVDP_MBX_GP_STATUS);
+       DBGFS_DUMP(HQVDP_MBX_NEXT_CMD);
+       DBGFS_DUMP(HQVDP_MBX_CURRENT_CMD);
+       DBGFS_DUMP(HQVDP_MBX_SOFT_VSYNC);
+       if (!(readl(hqvdp->regs + HQVDP_MBX_SOFT_VSYNC) & 3))
+               seq_puts(s, "\tHW Vsync");
+       else
+               seq_puts(s, "\tSW Vsync ?!?!");
+       /* Last command */
+       cmd = readl(hqvdp->regs + HQVDP_MBX_CURRENT_CMD);
+       cmd_offset = sti_hqvdp_get_curr_cmd(hqvdp);
+       if (cmd_offset == -1) {
+               seq_puts(s, "\n\n  Last command: unknown");
+       } else {
+               virt = hqvdp->hqvdp_cmd + cmd_offset;
+               seq_printf(s, "\n\n  Last command: address @ 0x%x (0x%p)",
+                          cmd, virt);
+               hqvdp_dbg_dump_cmd(s, (struct sti_hqvdp_cmd *)virt);
+       }
+       /* Next command */
+       cmd = readl(hqvdp->regs + HQVDP_MBX_NEXT_CMD);
+       cmd_offset = sti_hqvdp_get_next_cmd(hqvdp);
+       if (cmd_offset == -1) {
+               seq_puts(s, "\n\n  Next command: unknown");
+       } else {
+               virt = hqvdp->hqvdp_cmd + cmd_offset;
+               seq_printf(s, "\n\n  Next command address: @ 0x%x (0x%p)",
+                          cmd, virt);
+               hqvdp_dbg_dump_cmd(s, (struct sti_hqvdp_cmd *)virt);
+       }
+       seq_puts(s, "\n");
+       mutex_unlock(&dev->struct_mutex);
+       return 0;
+ }
+ static struct drm_info_list hqvdp_debugfs_files[] = {
+       { "hqvdp", hqvdp_dbg_show, 0, NULL },
+ };
+ static int hqvdp_debugfs_init(struct sti_hqvdp *hqvdp, struct drm_minor *minor)
+ {
+       unsigned int i;
+       for (i = 0; i < ARRAY_SIZE(hqvdp_debugfs_files); i++)
+               hqvdp_debugfs_files[i].data = hqvdp;
+       return drm_debugfs_create_files(hqvdp_debugfs_files,
+                                       ARRAY_SIZE(hqvdp_debugfs_files),
+                                       minor->debugfs_root, minor);
+ }
  /**
   * sti_hqvdp_update_hvsrc
   * @orient: horizontal or vertical
@@@ -580,7 -813,7 +813,7 @@@ int sti_hqvdp_vtg_cb(struct notifier_bl
                btm_cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
                top_cmd_offest = sti_hqvdp_get_curr_cmd(hqvdp);
                if ((btm_cmd_offset == -1) || (top_cmd_offest == -1)) {
-                       DRM_ERROR("Cannot get cmds, skip btm field\n");
+                       DRM_DEBUG_DRIVER("Warning: no cmd, will skip field\n");
                        return -EBUSY;
                }
  
                writel(hqvdp->hqvdp_cmd_paddr + btm_cmd_offset,
                                hqvdp->regs + HQVDP_MBX_NEXT_CMD);
  
-               hqvdp->curr_field_count++;
                hqvdp->btm_field_pending = false;
  
                dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
                                __func__, hqvdp->hqvdp_cmd_paddr);
+               sti_plane_update_fps(&hqvdp->plane, false, true);
        }
  
        return 0;
  static void sti_hqvdp_init(struct sti_hqvdp *hqvdp)
  {
        int size;
+       dma_addr_t dma_addr;
  
        hqvdp->vtg_nb.notifier_call = sti_hqvdp_vtg_cb;
  
        /* Allocate memory for the VDP commands */
        size = NB_VDP_CMD * sizeof(struct sti_hqvdp_cmd);
 -      hqvdp->hqvdp_cmd = dma_alloc_writecombine(hqvdp->dev, size,
 -                                       &dma_addr,
 -                                       GFP_KERNEL | GFP_DMA);
 +      hqvdp->hqvdp_cmd = dma_alloc_wc(hqvdp->dev, size,
-                                       &hqvdp->hqvdp_cmd_paddr,
++                                      &dma_addr,
 +                                      GFP_KERNEL | GFP_DMA);
        if (!hqvdp->hqvdp_cmd) {
                DRM_ERROR("Failed to allocate memory for VDP cmd\n");
                return;
        }
  
+       hqvdp->hqvdp_cmd_paddr = (u32)dma_addr;
        memset(hqvdp->hqvdp_cmd, 0, size);
  }
  
@@@ -670,7 -906,7 +906,7 @@@ static void sti_hqvdp_start_xp70(struc
        DRM_DEBUG_DRIVER("\n");
  
        if (hqvdp->xp70_initialized) {
-               DRM_INFO("HQVDP XP70 already initialized\n");
+               DRM_DEBUG_DRIVER("HQVDP XP70 already initialized\n");
                return;
        }
  
        release_firmware(firmware);
  }
  
- static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
-                                   struct drm_plane_state *oldstate)
+ static int sti_hqvdp_atomic_check(struct drm_plane *drm_plane,
+                                 struct drm_plane_state *state)
  {
-       struct drm_plane_state *state = drm_plane->state;
        struct sti_plane *plane = to_sti_plane(drm_plane);
        struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
        struct drm_crtc *crtc = state->crtc;
-       struct sti_mixer *mixer = to_sti_mixer(crtc);
        struct drm_framebuffer *fb = state->fb;
-       struct drm_display_mode *mode = &crtc->mode;
-       int dst_x = state->crtc_x;
-       int dst_y = state->crtc_y;
-       int dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
-       int dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
-       /* src_x are in 16.16 format */
-       int src_x = state->src_x >> 16;
-       int src_y = state->src_y >> 16;
-       int src_w = state->src_w >> 16;
-       int src_h = state->src_h >> 16;
        bool first_prepare = plane->status == STI_PLANE_DISABLED ? true : false;
-       struct drm_gem_cma_object *cma_obj;
-       struct sti_hqvdp_cmd *cmd;
-       int scale_h, scale_v;
-       int cmd_offset;
+       struct drm_crtc_state *crtc_state;
+       struct drm_display_mode *mode;
+       int dst_x, dst_y, dst_w, dst_h;
+       int src_x, src_y, src_w, src_h;
+       /* no need for further checks if the plane is being disabled */
+       if (!crtc || !fb)
+               return 0;
+       crtc_state = drm_atomic_get_crtc_state(state->state, crtc);
+       mode = &crtc_state->mode;
+       dst_x = state->crtc_x;
+       dst_y = state->crtc_y;
+       dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+       dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+       /* src_x are in 16.16 format */
+       src_x = state->src_x >> 16;
+       src_y = state->src_y >> 16;
+       src_w = state->src_w >> 16;
+       src_h = state->src_h >> 16;
+       if (!sti_hqvdp_check_hw_scaling(hqvdp, mode,
+                                       src_w, src_h,
+                                       dst_w, dst_h)) {
+               DRM_ERROR("Scaling beyond HW capabilities\n");
+               return -EINVAL;
+       }
+       if (!drm_fb_cma_get_gem_obj(fb, 0)) {
+               DRM_ERROR("Can't get CMA GEM object for fb\n");
+               return -EINVAL;
+       }
+       /*
+        * Input / output size
+        * Align to upper even value
+        */
+       dst_w = ALIGN(dst_w, 2);
+       dst_h = ALIGN(dst_h, 2);
+       if ((src_w > MAX_WIDTH) || (src_w < MIN_WIDTH) ||
+           (src_h > MAX_HEIGHT) || (src_h < MIN_HEIGHT) ||
+           (dst_w > MAX_WIDTH) || (dst_w < MIN_WIDTH) ||
+           (dst_h > MAX_HEIGHT) || (dst_h < MIN_HEIGHT)) {
+               DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
+                         src_w, src_h,
+                         dst_w, dst_h);
+               return -EINVAL;
+       }
+       if (first_prepare) {
+               /* Start HQVDP XP70 coprocessor */
+               sti_hqvdp_start_xp70(hqvdp);
+               /* Prevent VTG shutdown */
+               if (clk_prepare_enable(hqvdp->clk_pix_main)) {
+                       DRM_ERROR("Failed to prepare/enable pix main clk\n");
+                       return -EINVAL;
+               }
+               /* Register VTG Vsync callback to handle bottom fields */
+               if (sti_vtg_register_client(hqvdp->vtg,
+                                           &hqvdp->vtg_nb,
+                                           crtc)) {
+                       DRM_ERROR("Cannot register VTG notifier\n");
+                       return -EINVAL;
+               }
+       }
  
        DRM_DEBUG_KMS("CRTC:%d (%s) drm plane:%d (%s)\n",
-                     crtc->base.id, sti_mixer_to_str(mixer),
+                     crtc->base.id, sti_mixer_to_str(to_sti_mixer(crtc)),
                      drm_plane->base.id, sti_plane_to_str(plane));
        DRM_DEBUG_KMS("%s dst=(%dx%d)@(%d,%d) - src=(%dx%d)@(%d,%d)\n",
                      sti_plane_to_str(plane),
                      dst_w, dst_h, dst_x, dst_y,
                      src_w, src_h, src_x, src_y);
  
+       return 0;
+ }
+ static void sti_hqvdp_atomic_update(struct drm_plane *drm_plane,
+                                   struct drm_plane_state *oldstate)
+ {
+       struct drm_plane_state *state = drm_plane->state;
+       struct sti_plane *plane = to_sti_plane(drm_plane);
+       struct sti_hqvdp *hqvdp = to_sti_hqvdp(plane);
+       struct drm_crtc *crtc = state->crtc;
+       struct drm_framebuffer *fb = state->fb;
+       struct drm_display_mode *mode;
+       int dst_x, dst_y, dst_w, dst_h;
+       int src_x, src_y, src_w, src_h;
+       struct drm_gem_cma_object *cma_obj;
+       struct sti_hqvdp_cmd *cmd;
+       int scale_h, scale_v;
+       int cmd_offset;
+       if (!crtc || !fb)
+               return;
+       mode = &crtc->mode;
+       dst_x = state->crtc_x;
+       dst_y = state->crtc_y;
+       dst_w = clamp_val(state->crtc_w, 0, mode->crtc_hdisplay - dst_x);
+       dst_h = clamp_val(state->crtc_h, 0, mode->crtc_vdisplay - dst_y);
+       /* src_x are in 16.16 format */
+       src_x = state->src_x >> 16;
+       src_y = state->src_y >> 16;
+       src_w = state->src_w >> 16;
+       src_h = state->src_h >> 16;
        cmd_offset = sti_hqvdp_get_free_cmd(hqvdp);
        if (cmd_offset == -1) {
-               DRM_ERROR("No available hqvdp_cmd now\n");
+               DRM_DEBUG_DRIVER("Warning: no cmd, will skip frame\n");
                return;
        }
        cmd = hqvdp->hqvdp_cmd + cmd_offset;
  
-       if (!sti_hqvdp_check_hw_scaling(hqvdp, mode,
-                                       src_w, src_h,
-                                       dst_w, dst_h)) {
-               DRM_ERROR("Scaling beyond HW capabilities\n");
-               return;
-       }
        /* Static parameters, defaulting to progressive mode */
        cmd->top.config = TOP_CONFIG_PROGRESSIVE;
        cmd->top.mem_format = TOP_MEM_FORMAT_DFLT;
        cmd->iqi.pxf_conf = IQI_PXF_CONF_DFLT;
  
        cma_obj = drm_fb_cma_get_gem_obj(fb, 0);
-       if (!cma_obj) {
-               DRM_ERROR("Can't get CMA GEM object for fb\n");
-               return;
-       }
  
        DRM_DEBUG_DRIVER("drm FB:%d format:%.4s phys@:0x%lx\n", fb->base.id,
                         (char *)&fb->pixel_format,
        dst_w = ALIGN(dst_w, 2);
        dst_h = ALIGN(dst_h, 2);
  
-       if ((src_w > MAX_WIDTH) || (src_w < MIN_WIDTH) ||
-           (src_h > MAX_HEIGHT) || (src_h < MIN_HEIGHT) ||
-           (dst_w > MAX_WIDTH) || (dst_w < MIN_WIDTH) ||
-           (dst_h > MAX_HEIGHT) || (dst_h < MIN_HEIGHT)) {
-               DRM_ERROR("Invalid in/out size %dx%d -> %dx%d\n",
-                         src_w, src_h,
-                         dst_w, dst_h);
-               return;
-       }
        cmd->top.input_viewport_size = src_h << 16 | src_w;
        cmd->top.input_frame_size = src_h << 16 | src_w;
        cmd->hvsrc.output_picture_size = dst_h << 16 | dst_w;
        scale_v = SCALE_FACTOR * dst_h / src_h;
        sti_hqvdp_update_hvsrc(HVSRC_VERT, scale_v, &cmd->hvsrc);
  
-       if (first_prepare) {
-               /* Start HQVDP XP70 coprocessor */
-               sti_hqvdp_start_xp70(hqvdp);
-               /* Prevent VTG shutdown */
-               if (clk_prepare_enable(hqvdp->clk_pix_main)) {
-                       DRM_ERROR("Failed to prepare/enable pix main clk\n");
-                       return;
-               }
-               /* Register VTG Vsync callback to handle bottom fields */
-               if (sti_vtg_register_client(hqvdp->vtg,
-                                           &hqvdp->vtg_nb,
-                                           crtc)) {
-                       DRM_ERROR("Cannot register VTG notifier\n");
-                       return;
-               }
-       }
        writel(hqvdp->hqvdp_cmd_paddr + cmd_offset,
               hqvdp->regs + HQVDP_MBX_NEXT_CMD);
  
-       hqvdp->curr_field_count++;
        /* Interlaced : get ready to display the bottom field at next Vsync */
        if (fb->flags & DRM_MODE_FB_INTERLACED)
                hqvdp->btm_field_pending = true;
        dev_dbg(hqvdp->dev, "%s Posted command:0x%x\n",
                __func__, hqvdp->hqvdp_cmd_paddr + cmd_offset);
  
+       sti_plane_update_fps(plane, true, true);
        plane->status = STI_PLANE_UPDATED;
  }
  
@@@ -938,7 -1219,6 +1219,6 @@@ static void sti_hqvdp_atomic_disable(st
                                     struct drm_plane_state *oldstate)
  {
        struct sti_plane *plane = to_sti_plane(drm_plane);
-       struct sti_mixer *mixer = to_sti_mixer(drm_plane->crtc);
  
        if (!drm_plane->crtc) {
                DRM_DEBUG_DRIVER("drm plane:%d not enabled\n",
        }
  
        DRM_DEBUG_DRIVER("CRTC:%d (%s) drm plane:%d (%s)\n",
-                        drm_plane->crtc->base.id, sti_mixer_to_str(mixer),
+                        drm_plane->crtc->base.id,
+                        sti_mixer_to_str(to_sti_mixer(drm_plane->crtc)),
                         drm_plane->base.id, sti_plane_to_str(plane));
  
        plane->status = STI_PLANE_DISABLING;
  }
  
  static const struct drm_plane_helper_funcs sti_hqvdp_helpers_funcs = {
+       .atomic_check = sti_hqvdp_atomic_check,
        .atomic_update = sti_hqvdp_atomic_update,
        .atomic_disable = sti_hqvdp_atomic_disable,
  };
@@@ -983,6 -1265,9 +1265,9 @@@ static struct drm_plane *sti_hqvdp_crea
  
        sti_plane_init_property(&hqvdp->plane, DRM_PLANE_TYPE_OVERLAY);
  
+       if (hqvdp_debugfs_init(hqvdp, drm_dev->primary))
+               DRM_ERROR("HQVDP debugfs setup failed\n");
        return &hqvdp->plane.drm_plane;
  }
  
index 034ef2de903769f521c50c6a6936a5099e69578f,ac8eafea63611b831d5c05ce637c3a84e7e09665..9807bc9d296e33cb3c159d95ae09af778a21b71c
@@@ -398,8 -398,9 +398,8 @@@ int vc4_mmap(struct file *filp, struct 
        vma->vm_flags &= ~VM_PFNMAP;
        vma->vm_pgoff = 0;
  
 -      ret = dma_mmap_writecombine(bo->base.base.dev->dev, vma,
 -                                  bo->base.vaddr, bo->base.paddr,
 -                                  vma->vm_end - vma->vm_start);
 +      ret = dma_mmap_wc(bo->base.base.dev->dev, vma, bo->base.vaddr,
 +                        bo->base.paddr, vma->vm_end - vma->vm_start);
        if (ret)
                drm_gem_vm_close(vma);
  
@@@ -498,11 -499,12 +498,12 @@@ vc4_create_shader_bo_ioctl(struct drm_d
        if (IS_ERR(bo))
                return PTR_ERR(bo);
  
-       ret = copy_from_user(bo->base.vaddr,
+       if (copy_from_user(bo->base.vaddr,
                             (void __user *)(uintptr_t)args->data,
-                            args->size);
-       if (ret != 0)
+                            args->size)) {
+               ret = -EFAULT;
                goto fail;
+       }
        /* Clear the rest of the memory from allocating from the BO
         * cache.
         */
diff --combined drivers/gpu/host1x/job.c
index defa7995f2131a06ddc88e09d13d914f8a244ea1,1919aab88c3f412a10fa22fd5d85557f0e38a9a0..b4515d54403967dad522594c19dd2052f9c875c0
@@@ -225,7 -225,7 +225,7 @@@ unpin
        return 0;
  }
  
- static unsigned int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
+ static int do_relocs(struct host1x_job *job, struct host1x_bo *cmdbuf)
  {
        int i = 0;
        u32 last_page = ~0;
@@@ -467,8 -467,9 +467,8 @@@ static inline int copy_gathers(struct h
                size += g->words * sizeof(u32);
        }
  
 -      job->gather_copy_mapped = dma_alloc_writecombine(dev, size,
 -                                                       &job->gather_copy,
 -                                                       GFP_KERNEL);
 +      job->gather_copy_mapped = dma_alloc_wc(dev, size, &job->gather_copy,
 +                                             GFP_KERNEL);
        if (!job->gather_copy_mapped) {
                job->gather_copy_mapped = NULL;
                return -ENOMEM;
@@@ -577,8 -578,9 +577,8 @@@ void host1x_job_unpin(struct host1x_jo
        job->num_unpins = 0;
  
        if (job->gather_copy_size)
 -              dma_free_writecombine(job->channel->dev, job->gather_copy_size,
 -                                    job->gather_copy_mapped,
 -                                    job->gather_copy);
 +              dma_free_wc(job->channel->dev, job->gather_copy_size,
 +                          job->gather_copy_mapped, job->gather_copy);
  }
  EXPORT_SYMBOL(host1x_job_unpin);
  
index 1c872bdfddf600b322165389690b99646473e67d,0754a37c967495fdafdf5bee4bd67d505c347dda..bceb81309787c5638a40b4f15e043bcf29437647
@@@ -251,10 -251,8 +251,10 @@@ static struct ion_buffer *ion_buffer_cr
         * memory coming from the heaps is ready for dma, ie if it has a
         * cached mapping that mapping has been invalidated
         */
 -      for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i)
 +      for_each_sg(buffer->sg_table->sgl, sg, buffer->sg_table->nents, i) {
                sg_dma_address(sg) = sg_phys(sg);
 +              sg_dma_len(sg) = sg->length;
 +      }
        mutex_lock(&dev->buffer_lock);
        ion_buffer_add(dev, buffer);
        mutex_unlock(&dev->buffer_lock);
@@@ -387,22 -385,13 +387,22 @@@ static void ion_handle_get(struct ion_h
        kref_get(&handle->ref);
  }
  
 -static int ion_handle_put(struct ion_handle *handle)
 +static int ion_handle_put_nolock(struct ion_handle *handle)
 +{
 +      int ret;
 +
 +      ret = kref_put(&handle->ref, ion_handle_destroy);
 +
 +      return ret;
 +}
 +
 +int ion_handle_put(struct ion_handle *handle)
  {
        struct ion_client *client = handle->client;
        int ret;
  
        mutex_lock(&client->lock);
 -      ret = kref_put(&handle->ref, ion_handle_destroy);
 +      ret = ion_handle_put_nolock(handle);
        mutex_unlock(&client->lock);
  
        return ret;
@@@ -426,30 -415,20 +426,30 @@@ static struct ion_handle *ion_handle_lo
        return ERR_PTR(-EINVAL);
  }
  
 -static struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
 +static struct ion_handle *ion_handle_get_by_id_nolock(struct ion_client *client,
                                                int id)
  {
        struct ion_handle *handle;
  
 -      mutex_lock(&client->lock);
        handle = idr_find(&client->idr, id);
        if (handle)
                ion_handle_get(handle);
 -      mutex_unlock(&client->lock);
  
        return handle ? handle : ERR_PTR(-EINVAL);
  }
  
 +struct ion_handle *ion_handle_get_by_id(struct ion_client *client,
 +                                              int id)
 +{
 +      struct ion_handle *handle;
 +
 +      mutex_lock(&client->lock);
 +      handle = ion_handle_get_by_id_nolock(client, id);
 +      mutex_unlock(&client->lock);
 +
 +      return handle;
 +}
 +
  static bool ion_handle_validate(struct ion_client *client,
                                struct ion_handle *handle)
  {
@@@ -551,28 -530,22 +551,28 @@@ struct ion_handle *ion_alloc(struct ion
  }
  EXPORT_SYMBOL(ion_alloc);
  
 -void ion_free(struct ion_client *client, struct ion_handle *handle)
 +static void ion_free_nolock(struct ion_client *client, struct ion_handle *handle)
  {
        bool valid_handle;
  
        BUG_ON(client != handle->client);
  
 -      mutex_lock(&client->lock);
        valid_handle = ion_handle_validate(client, handle);
  
        if (!valid_handle) {
                WARN(1, "%s: invalid handle passed to free.\n", __func__);
 -              mutex_unlock(&client->lock);
                return;
        }
 +      ion_handle_put_nolock(handle);
 +}
 +
 +void ion_free(struct ion_client *client, struct ion_handle *handle)
 +{
 +      BUG_ON(client != handle->client);
 +
 +      mutex_lock(&client->lock);
 +      ion_free_nolock(client, handle);
        mutex_unlock(&client->lock);
 -      ion_handle_put(handle);
  }
  EXPORT_SYMBOL(ion_free);
  
@@@ -702,34 -675,6 +702,34 @@@ void ion_unmap_kernel(struct ion_clien
  }
  EXPORT_SYMBOL(ion_unmap_kernel);
  
 +static struct mutex debugfs_mutex;
 +static struct rb_root *ion_root_client;
 +static int is_client_alive(struct ion_client *client)
 +{
 +      struct rb_node *node;
 +      struct ion_client *tmp;
 +      struct ion_device *dev;
 +
 +      node = ion_root_client->rb_node;
 +      dev = container_of(ion_root_client, struct ion_device, clients);
 +
 +      down_read(&dev->lock);
 +      while (node) {
 +              tmp = rb_entry(node, struct ion_client, node);
 +              if (client < tmp) {
 +                      node = node->rb_left;
 +              } else if (client > tmp) {
 +                      node = node->rb_right;
 +              } else {
 +                      up_read(&dev->lock);
 +                      return 1;
 +              }
 +      }
 +
 +      up_read(&dev->lock);
 +      return 0;
 +}
 +
  static int ion_debug_client_show(struct seq_file *s, void *unused)
  {
        struct ion_client *client = s->private;
        const char *names[ION_NUM_HEAP_IDS] = {NULL};
        int i;
  
 +      mutex_lock(&debugfs_mutex);
 +      if (!is_client_alive(client)) {
 +              seq_printf(s, "ion_client 0x%p dead, can't dump its buffers\n",
 +                         client);
 +              mutex_unlock(&debugfs_mutex);
 +              return 0;
 +      }
 +
        mutex_lock(&client->lock);
        for (n = rb_first(&client->handles); n; n = rb_next(n)) {
                struct ion_handle *handle = rb_entry(n, struct ion_handle,
                sizes[id] += handle->buffer->size;
        }
        mutex_unlock(&client->lock);
 +      mutex_unlock(&debugfs_mutex);
  
        seq_printf(s, "%16.16s: %16.16s\n", "heap_name", "size_in_bytes");
        for (i = 0; i < ION_NUM_HEAP_IDS; i++) {
@@@ -894,7 -830,6 +894,7 @@@ void ion_client_destroy(struct ion_clie
        struct rb_node *n;
  
        pr_debug("%s: %d\n", __func__, __LINE__);
 +      mutex_lock(&debugfs_mutex);
        while ((n = rb_first(&client->handles))) {
                struct ion_handle *handle = rb_entry(n, struct ion_handle,
                                                     node);
        kfree(client->display_name);
        kfree(client->name);
        kfree(client);
 +      mutex_unlock(&debugfs_mutex);
  }
  EXPORT_SYMBOL(ion_client_destroy);
  
@@@ -1123,8 -1057,7 +1123,7 @@@ static void ion_dma_buf_kunmap(struct d
  {
  }
  
- static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
-                                       size_t len,
+ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
                                        enum dma_data_direction direction)
  {
        struct ion_buffer *buffer = dmabuf->priv;
        return PTR_ERR_OR_ZERO(vaddr);
  }
  
- static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
-                                      size_t len,
+ static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
                                       enum dma_data_direction direction)
  {
        struct ion_buffer *buffer = dmabuf->priv;
@@@ -1217,18 -1149,22 +1215,18 @@@ int ion_share_dma_buf_fd(struct ion_cli
  }
  EXPORT_SYMBOL(ion_share_dma_buf_fd);
  
 -struct ion_handle *ion_import_dma_buf(struct ion_client *client, int fd)
 +struct ion_handle *ion_import_dma_buf(struct ion_client *client,
 +                                    struct dma_buf *dmabuf)
  {
 -      struct dma_buf *dmabuf;
        struct ion_buffer *buffer;
        struct ion_handle *handle;
        int ret;
  
 -      dmabuf = dma_buf_get(fd);
 -      if (IS_ERR(dmabuf))
 -              return ERR_CAST(dmabuf);
        /* if this memory came from ion */
  
        if (dmabuf->ops != &dma_buf_ops) {
                pr_err("%s: can not import dmabuf from another exporter\n",
                       __func__);
 -              dma_buf_put(dmabuf);
                return ERR_PTR(-EINVAL);
        }
        buffer = dmabuf->priv;
        }
  
  end:
 -      dma_buf_put(dmabuf);
        return handle;
  }
  EXPORT_SYMBOL(ion_import_dma_buf);
  
 +struct ion_handle *ion_import_dma_buf_fd(struct ion_client *client, int fd)
 +{
 +      struct dma_buf *dmabuf;
 +      struct ion_handle *handle;
 +
 +      dmabuf = dma_buf_get(fd);
 +      if (IS_ERR(dmabuf))
 +              return ERR_CAST(dmabuf);
 +
 +      handle = ion_import_dma_buf(client, dmabuf);
 +      dma_buf_put(dmabuf);
 +      return handle;
 +}
 +EXPORT_SYMBOL(ion_import_dma_buf_fd);
 +
  static int ion_sync_for_device(struct ion_client *client, int fd)
  {
        struct dma_buf *dmabuf;
@@@ -1357,15 -1279,11 +1355,15 @@@ static long ion_ioctl(struct file *filp
        {
                struct ion_handle *handle;
  
 -              handle = ion_handle_get_by_id(client, data.handle.handle);
 -              if (IS_ERR(handle))
 +              mutex_lock(&client->lock);
 +              handle = ion_handle_get_by_id_nolock(client, data.handle.handle);
 +              if (IS_ERR(handle)) {
 +                      mutex_unlock(&client->lock);
                        return PTR_ERR(handle);
 -              ion_free(client, handle);
 -              ion_handle_put(handle);
 +              }
 +              ion_free_nolock(client, handle);
 +              ion_handle_put_nolock(handle);
 +              mutex_unlock(&client->lock);
                break;
        }
        case ION_IOC_SHARE:
        {
                struct ion_handle *handle;
  
 -              handle = ion_import_dma_buf(client, data.fd.fd);
 +              handle = ion_import_dma_buf_fd(client, data.fd.fd);
                if (IS_ERR(handle))
                        ret = PTR_ERR(handle);
                else
@@@ -1483,7 -1401,6 +1481,7 @@@ static int ion_debug_heap_show(struct s
        seq_printf(s, "%16s %16s %16s\n", "client", "pid", "size");
        seq_puts(s, "----------------------------------------------------\n");
  
 +      mutex_lock(&debugfs_mutex);
        for (n = rb_first(&dev->clients); n; n = rb_next(n)) {
                struct ion_client *client = rb_entry(n, struct ion_client,
                                                     node);
                                   client->pid, size);
                }
        }
 +      mutex_unlock(&debugfs_mutex);
 +
        seq_puts(s, "----------------------------------------------------\n");
        seq_puts(s, "orphaned allocations (info is from last known client):\n");
        mutex_lock(&dev->buffer_lock);
@@@ -1555,7 -1470,7 +1553,7 @@@ static int debug_shrink_set(void *data
        struct shrink_control sc;
        int objs;
  
 -      sc.gfp_mask = -1;
 +      sc.gfp_mask = GFP_HIGHUSER;
        sc.nr_to_scan = val;
  
        if (!val) {
@@@ -1573,7 -1488,7 +1571,7 @@@ static int debug_shrink_get(void *data
        struct shrink_control sc;
        int objs;
  
 -      sc.gfp_mask = -1;
 +      sc.gfp_mask = GFP_HIGHUSER;
        sc.nr_to_scan = 0;
  
        objs = heap->shrinker.count_objects(&heap->shrinker, &sc);
@@@ -1688,8 -1603,6 +1686,8 @@@ debugfs_done
        init_rwsem(&idev->lock);
        plist_head_init(&idev->heaps);
        idev->clients = RB_ROOT;
 +      ion_root_client = &idev->clients;
 +      mutex_init(&debugfs_mutex);
        return idev;
  }
  EXPORT_SYMBOL(ion_device_create);
diff --combined mm/swapfile.c
index b86cf26a586b6a6e71b6dfb79e0d1c2442c5e77a,38884383ebac5d8d55dc14e49f10d190a4dee37a..560ad380634c19661f5606d0397ac85aa62c8add
@@@ -48,6 -48,12 +48,12 @@@ static sector_t map_swap_entry(swp_entr
  DEFINE_SPINLOCK(swap_lock);
  static unsigned int nr_swapfiles;
  atomic_long_t nr_swap_pages;
+ /*
+  * Some modules use swappable objects and may try to swap them out under
+  * memory pressure (via the shrinker). Before doing so, they may wish to
+  * check to see if any swap space is available.
+  */
+ EXPORT_SYMBOL_GPL(nr_swap_pages);
  /* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
  long total_swap_pages;
  static int least_priority;
@@@ -2526,7 -2532,8 +2532,7 @@@ SYSCALL_DEFINE2(swapon, const char __us
                  (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
        enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map);
  
 -      pr_info("Adding %uk swap on %s.  "
 -                      "Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
 +      pr_info("Adding %uk swap on %s.  Priority:%d extents:%d across:%lluk %s%s%s%s%s\n",
                p->pages<<(PAGE_SHIFT-10), name->name, p->prio,
                nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
                (p->flags & SWP_SOLIDSTATE) ? "SS" : "",