Merge tag 'drm-next-2019-05-09' of git://anongit.freedesktop.org/drm/drm
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 9 May 2019 04:35:19 +0000 (21:35 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 9 May 2019 04:35:19 +0000 (21:35 -0700)
Pull drm updates from Dave Airlie:
 "This has two exciting community drivers for ARM Mali accelerators.
  Since ARM has never been open source friendly on the GPU side of the
  house, the community has had to create open source drivers for the
  Mali GPUs. Lima covers the older t4xx and panfrost the newer 6xx/7xx
  series. Well done to all involved and hopefully this will help ARM
  head in the right direction.

  There is also now the ability if you don't have any of the legacy
  drivers enabled (pre-KMS) to remove all the pre-KMS support code from
  the core drm, this saves 10% or so in codesize on my machine.

  i915 also enable Icelake/Elkhart Lake Gen11 GPUs by default, vboxvideo
  moves out of staging.

  There are also some rcar-du patches which crossover with media tree
  but all should be acked by Mauro.

  Summary:

  uapi changes:
   - Colorspace connector property
   - fourcc - new YUV formts
   - timeline sync objects initially merged
   - expose FB_DAMAGE_CLIPS to atomic userspace

  new drivers:
   - vboxvideo: moved out of staging
   - aspeed: ASPEED SoC BMC chip display support
   - lima: ARM Mali4xx GPU acceleration driver support
   - panfrost: ARM Mali6xx/7xx Midgard/Bitfrost acceleration driver support

  core:
   - component helper docs
   - unplugging fixes
   - devm device init
   - MIPI/DSI rate control
   - shmem backed gem objects
   - connector, display_info, edid_quirks cleanups
   - dma_buf fence chain support
   - 64-bit dma-fence seqno comparison fixes
   - move initial fb config code to core
   - gem fence array helpers for Lima
   - ability to remove legacy support code if no drivers requires it (removes 10% of drm.ko size)
   - lease fixes

  ttm:
   - unified DRM_FILE_PAGE_OFFSET handling
   - Account for kernel allocations in kernel zone only

  panel:
   - OSD070T1718-19TS panel support
   - panel-tpo-td028ttec1 backlight support
   - Ronbo RB070D30 MIPI/DSI
   - Feiyang FY07024DI26A30-D MIPI-DSI panel
   - Rocktech jh057n00900 MIPI-DSI panel

  i915:
   - Comet Lake (Gen9) PCI IDs
   - Updated Icelake PCI IDs
   - Elkhartlake (Gen11) support
   - DP MST property addtions
   - plane and watermark fixes
   - Icelake port sync and VEBOX disable fixes
   - struct_mutex usage reduction
   - Icelake gamma fix
   - GuC reset fixes
   - make mmap more asynchronous
   - sound display power well race fixes
   - DDI/MIPI-DSI clocks for Icelake
   - Icelake RPS frequency changing support
   - Icelake workarounds

  amdgpu:
   - Use HMM for userptr
   - vega20 experimental smu11 support
   - RAS support for vega20
   - BACO support for vega12 + fixes for vega20
   - reworked IH interrupt handling
   - amdkfd RAS support
   - Freesync improvements
   - initial timeline sync object support
   - DC Z ordering fixes
   - NV12 planes support
   - colorspace properties for planes=
   - eDP opts if eDP already initialized

  nouveau:
   - misc fixes

  etnaviv:
   - misc fixes

  msm:
   - GPU zap shader support expansion
   - robustness ABI addition

  exynos:
   - Logging cleanups

  tegra:
   - Shared reset fix
   - CPU cache maintenance fix

  cirrus:
   - driver rewritten using simple helpers

  meson:
   - G12A support

  vmwgfx:
   - Resource dirtying management improvements
   - Userspace logging improvements

  virtio:
   - PRIME fixes

  rockchip:
   - rk3066 hdmi support

  sun4i:
   - DSI burst mode support

  vc4:
   - load tracker to detect underflow

  v3d:
   - v3d v4.2 support

  malidp:
   - initial Mali D71 support in komeda driver

  tfp410:
   - omap related improvement

  omapdrm:
   - drm bridge/panel support
   - drop some omap specific panels

  rcar-du:
   - Display writeback support"

* tag 'drm-next-2019-05-09' of git://anongit.freedesktop.org/drm/drm: (1507 commits)
  drm/msm/a6xx: No zap shader is not an error
  drm/cma-helper: Fix drm_gem_cma_free_object()
  drm: Fix timestamp docs for variable refresh properties.
  drm/komeda: Mark the local functions as static
  drm/komeda: Fixed warning: Function parameter or member not described
  drm/komeda: Expose bus_width to Komeda-CORE
  drm/komeda: Add sysfs attribute: core_id and config_id
  drm: add non-desktop quirk for Valve HMDs
  drm/panfrost: Show stored feature registers
  drm/panfrost: Don't scream about deferred probe
  drm/panfrost: Disable PM on probe failure
  drm/panfrost: Set DMA masks earlier
  drm/panfrost: Add sanity checks to submit IOCTL
  drm/etnaviv: initialize idle mask before querying the HW db
  drm: introduce a capability flag for syncobj timeline support
  drm: report consistent errors when checking syncobj capibility
  drm/nouveau/nouveau: forward error generated while resuming objects tree
  drm/nouveau/fb/ramgk104: fix spelling mistake "sucessfully" -> "successfully"
  drm/nouveau/i2c: Disable i2c bus access after ->fini()
  drm/nouveau: Remove duplicate ACPI_VIDEO_NOTIFY_PROBE definition
  ...

29 files changed:
1  2 
Documentation/devicetree/bindings/vendor-prefixes.txt
MAINTAINERS
drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c
drivers/gpu/drm/amd/display/dc/core/dc.c
drivers/gpu/drm/amd/display/dc/dc.h
drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
drivers/gpu/drm/drm_syncobj.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_vma.c
drivers/gpu/drm/i915/intel_hdmi.c
drivers/gpu/drm/i915/intel_runtime_pm.c
drivers/gpu/drm/imx/ipuv3-crtc.c
drivers/gpu/drm/ttm/ttm_bo.c
drivers/gpu/drm/ttm/ttm_memory.c
drivers/gpu/drm/vboxvideo/Kconfig
drivers/gpu/drm/vboxvideo/vbox_mode.c
drivers/gpu/drm/vboxvideo/vbox_prime.c
drivers/gpu/drm/vc4/vc4_crtc.c
drivers/gpu/drm/virtio/virtgpu_drv.c
drivers/gpu/drm/virtio/virtgpu_drv.h
drivers/gpu/drm/virtio/virtgpu_prime.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/staging/Kconfig
drivers/staging/Makefile
drivers/usb/dwc3/dwc3-of-simple.c
include/drm/ttm/ttm_bo_driver.h

index 686771d056c7e5969683813b22335b704b19be06,9506140167d67c6457e191ff99f2226f20b14544..9ed399977297b12a4b44622739b003f57997e0b1
@@@ -36,7 -36,6 +36,7 @@@ aptina        Aptina Imagin
  arasan        Arasan Chip Systems
  archermind ArcherMind Technology (Nanjing) Co., Ltd.
  arctic        Arctic Sand
 +arcx  arcx Inc. / Archronix Inc.
  aries Aries Embedded GmbH
  arm   ARM Ltd.
  armadeus      ARMadeus Systems SARL
@@@ -211,7 -210,6 +211,7 @@@ kiebackpeter    Kieback & Peter Gmb
  kinetic Kinetic Technologies
  kingdisplay   King & Display Technology Co., Ltd.
  kingnovel     Kingnovel Technology Co., Ltd.
 +kionix        Kionix, Inc.
  koe   Kaohsiung Opto-Electronics Inc.
  kosagi        Sutajio Ko-Usagi PTE Ltd.
  kyo   Kyocera Corporation
@@@ -235,7 -233,6 +235,7 @@@ lsi        LSI Corp. (LSI Logic
  lwn   Liebherr-Werk Nenzing GmbH
  macnica       Macnica Americas
  marvell       Marvell Technology Group Ltd.
 +maxbotix      MaxBotix Inc.
  maxim Maxim Integrated Products
  mbvl  Mobiveil Inc.
  mcube mCube
@@@ -305,6 -302,7 +305,7 @@@ oranth     Shenzhen Oranth Technology Co., 
  ORCL  Oracle Corporation
  orisetech     Orise Technology
  ortustech     Ortus Technology Co., Ltd.
+ osddisplays   OSD Displays
  ovti  OmniVision Technologies
  oxsemi        Oxford Semiconductor, Ltd.
  panasonic     Panasonic Corporation
@@@ -347,7 -345,9 +348,9 @@@ ricoh      Ricoh Co. Ltd
  rikomagic     Rikomagic Tech Corp. Ltd
  riscv RISC-V Foundation
  rockchip      Fuzhou Rockchip Electronics Co., Ltd
+ rocktech      ROCKTECH DISPLAYS LIMITED
  rohm  ROHM Semiconductor Co., Ltd
+ ronbo   Ronbo Electronics
  roofull       Shenzhen Roofull Technology Co, Ltd
  samsung       Samsung Semiconductor
  samtec        Samtec/Softing company
diff --combined MAINTAINERS
index 4244dd341eb779521a745e250e1ca0fd7253a0b7,e233b3c485460d002204394e2070c383d1baaa5c..cdd21faa0d0f9eda53497b4811c2c8b6294acf7d
@@@ -268,13 -268,12 +268,13 @@@ L:      linux-gpio@vger.kernel.or
  S:    Maintained
  F:    drivers/gpio/gpio-104-idio-16.c
  
 -ACCES 104-QUAD-8 IIO DRIVER
 +ACCES 104-QUAD-8 DRIVER
  M:    William Breathitt Gray <vilhelm.gray@gmail.com>
  L:    linux-iio@vger.kernel.org
  S:    Maintained
 +F:    Documentation/ABI/testing/sysfs-bus-counter-104-quad-8
  F:    Documentation/ABI/testing/sysfs-bus-iio-counter-104-quad-8
 -F:    drivers/iio/counter/104-quad-8.c
 +F:    drivers/counter/104-quad-8.c
  
  ACCES PCI-IDIO-16 GPIO DRIVER
  M:    William Breathitt Gray <vilhelm.gray@gmail.com>
@@@ -469,7 -468,7 +469,7 @@@ ADM1025 HARDWARE MONITOR DRIVE
  M:    Jean Delvare <jdelvare@suse.com>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/adm1025
 +F:    Documentation/hwmon/adm1025.rst
  F:    drivers/hwmon/adm1025.c
  
  ADM1029 HARDWARE MONITOR DRIVER
@@@ -521,7 -520,7 +521,7 @@@ ADS1015 HARDWARE MONITOR DRIVE
  M:    Dirk Eibach <eibach@gdsys.de>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/ads1015
 +F:    Documentation/hwmon/ads1015.rst
  F:    drivers/hwmon/ads1015.c
  F:    include/linux/platform_data/ads1015.h
  
@@@ -534,7 -533,7 +534,7 @@@ ADT7475 HARDWARE MONITOR DRIVE
  M:    Jean Delvare <jdelvare@suse.com>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/adt7475
 +F:    Documentation/hwmon/adt7475.rst
  F:    drivers/hwmon/adt7475.c
  
  ADVANSYS SCSI DRIVER
@@@ -765,7 -764,7 +765,7 @@@ AMD FAM15H PROCESSOR POWER MONITORING D
  M:    Huang Rui <ray.huang@amd.com>
  L:    linux-hwmon@vger.kernel.org
  S:    Supported
 -F:    Documentation/hwmon/fam15h_power
 +F:    Documentation/hwmon/fam15h_power.rst
  F:    drivers/hwmon/fam15h_power.c
  
  AMD FCH GPIO DRIVER
@@@ -869,7 -868,7 +869,7 @@@ L: linux-iio@vger.kernel.or
  W:    http://ez.analog.com/community/linux-device-drivers
  S:    Supported
  F:    drivers/iio/adc/ad7606.c
 -F:    Documentation/devicetree/bindings/iio/adc/ad7606.txt
 +F:    Documentation/devicetree/bindings/iio/adc/adi,ad7606.txt
  
  ANALOG DEVICES INC AD7768-1 DRIVER
  M:    Stefan Popa <stefan.popa@analog.com>
@@@ -951,7 -950,6 +951,7 @@@ F: drivers/dma/dma-axi-dmac.
  ANALOG DEVICES INC IIO DRIVERS
  M:    Lars-Peter Clausen <lars@metafoo.de>
  M:    Michael Hennerich <Michael.Hennerich@analog.com>
 +M:    Stefan Popa <stefan.popa@analog.com>
  W:    http://wiki.analog.com/
  W:    http://ez.analog.com/community/linux-device-drivers
  S:    Supported
@@@ -1169,7 -1167,7 +1169,7 @@@ S:      Supporte
  T:    git git://linux-arm.org/linux-ld.git for-upstream/mali-dp
  F:    drivers/gpu/drm/arm/display/include/
  F:    drivers/gpu/drm/arm/display/komeda/
- F:    Documentation/devicetree/bindings/display/arm/arm,komeda.txt
+ F:    Documentation/devicetree/bindings/display/arm,komeda.txt
  F:    Documentation/gpu/komeda-kms.rst
  
  ARM MALI-DP DRM DRIVER
@@@ -1182,6 -1180,15 +1182,15 @@@ F:    drivers/gpu/drm/arm
  F:    Documentation/devicetree/bindings/display/arm,malidp.txt
  F:    Documentation/gpu/afbc.rst
  
+ ARM MALI PANFROST DRM DRIVER
+ M:    Rob Herring <robh@kernel.org>
+ M:    Tomeu Vizoso <tomeu.vizoso@collabora.com>
+ L:    dri-devel@lists.freedesktop.org
+ S:    Supported
+ T:    git git://anongit.freedesktop.org/drm/drm-misc
+ F:    drivers/gpu/drm/panfrost/
+ F:    include/uapi/drm/panfrost_drm.h
  ARM MFM AND FLOPPY DRIVERS
  M:    Ian Molton <spyro@f2s.com>
  S:    Maintained
@@@ -1418,9 -1425,7 +1427,9 @@@ M:      Manivannan Sadhasivam <manivannan.sa
  L:    linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
  S:    Maintained
  F:    arch/arm64/boot/dts/bitmain/
 +F:    drivers/pinctrl/pinctrl-bm1880.c
  F:    Documentation/devicetree/bindings/arm/bitmain.yaml
 +F:    Documentation/devicetree/bindings/pinctrl/bitmain,bm1880-pinctrl.txt
  
  ARM/CALXEDA HIGHBANK ARCHITECTURE
  M:    Rob Herring <robh@kernel.org>
@@@ -2517,7 -2522,7 +2526,7 @@@ ASC7621 HARDWARE MONITOR DRIVE
  M:    George Joseph <george.joseph@fairview5.com>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/asc7621
 +F:    Documentation/hwmon/asc7621.rst
  F:    drivers/hwmon/asc7621.c
  
  ASPEED VIDEO ENGINE DRIVER
@@@ -2798,13 -2803,10 +2807,13 @@@ M:   Simon Wunderlich <sw@simonwunderlich
  M:    Antonio Quartulli <a@unstable.cc>
  L:    b.a.t.m.a.n@lists.open-mesh.org (moderated for non-subscribers)
  W:    https://www.open-mesh.org/
 +B:    https://www.open-mesh.org/projects/batman-adv/issues
 +C:    irc://chat.freenode.net/batman
  Q:    https://patchwork.open-mesh.org/project/batman/list/
 +T:    git https://git.open-mesh.org/linux-merge.git
  S:    Maintained
 -F:    Documentation/ABI/testing/sysfs-class-net-batman-adv
 -F:    Documentation/ABI/testing/sysfs-class-net-mesh
 +F:    Documentation/ABI/obsolete/sysfs-class-net-batman-adv
 +F:    Documentation/ABI/obsolete/sysfs-class-net-mesh
  F:    Documentation/networking/batman-adv.rst
  F:    include/uapi/linux/batadv_packet.h
  F:    include/uapi/linux/batman_adv.h
@@@ -3128,7 -3130,6 +3137,7 @@@ F:      drivers/cpufreq/bmips-cpufreq.
  BROADCOM BMIPS MIPS ARCHITECTURE
  M:    Kevin Cernekee <cernekee@gmail.com>
  M:    Florian Fainelli <f.fainelli@gmail.com>
 +L:    bcm-kernel-feedback-list@broadcom.com
  L:    linux-mips@vger.kernel.org
  T:    git git://github.com/broadcom/stblinux.git
  S:    Maintained
@@@ -3737,8 -3738,8 +3746,8 @@@ F:      scripts/checkpatch.p
  
  CHINESE DOCUMENTATION
  M:    Harry Wei <harryxiyou@gmail.com>
 +M:    Alex Shi <alex.shi@linux.alibaba.com>
  L:    xiyoulinuxkernelgroup@googlegroups.com (subscribers-only)
 -L:    linux-kernel@zh-kernel.org (moderated for non-subscribers)
  S:    Maintained
  F:    Documentation/translations/zh_CN/
  
@@@ -3805,7 -3806,6 +3814,7 @@@ M:      Richard Fitzgerald <rf@opensource.ci
  L:    patches@opensource.cirrus.com
  S:    Supported
  F:    drivers/clk/clk-lochnagar.c
 +F:    drivers/hwmon/lochnagar-hwmon.c
  F:    drivers/mfd/lochnagar-i2c.c
  F:    drivers/pinctrl/cirrus/pinctrl-lochnagar.c
  F:    drivers/regulator/lochnagar-regulator.c
@@@ -3814,10 -3814,8 +3823,10 @@@ F:    include/dt-bindings/pinctrl/lochnaga
  F:    include/linux/mfd/lochnagar*
  F:    Documentation/devicetree/bindings/mfd/cirrus,lochnagar.txt
  F:    Documentation/devicetree/bindings/clock/cirrus,lochnagar.txt
 +F:    Documentation/devicetree/bindings/hwmon/cirrus,lochnagar.txt
  F:    Documentation/devicetree/bindings/pinctrl/cirrus,lochnagar.txt
  F:    Documentation/devicetree/bindings/regulator/cirrus,lochnagar.txt
 +F:    Documentation/hwmon/lochnagar
  
  CISCO FCOE HBA DRIVER
  M:    Satish Kharat <satishkh@cisco.com>
@@@ -4055,7 -4053,7 +4064,7 @@@ CORETEMP HARDWARE MONITORING DRIVE
  M:    Fenghua Yu <fenghua.yu@intel.com>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/coretemp
 +F:    Documentation/hwmon/coretemp.rst
  F:    drivers/hwmon/coretemp.c
  
  COSA/SRP SYNC SERIAL DRIVER
@@@ -4064,16 -4062,6 +4073,16 @@@ W:    http://www.fi.muni.cz/~kas/cosa
  S:    Maintained
  F:    drivers/net/wan/cosa*
  
 +COUNTER SUBSYSTEM
 +M:    William Breathitt Gray <vilhelm.gray@gmail.com>
 +L:    linux-iio@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/ABI/testing/sysfs-bus-counter*
 +F:    Documentation/driver-api/generic-counter.rst
 +F:    drivers/counter/
 +F:    include/linux/counter.h
 +F:    include/linux/counter_enum.h
 +
  CPMAC ETHERNET DRIVER
  M:    Florian Fainelli <f.fainelli@gmail.com>
  L:    netdev@vger.kernel.org
@@@ -4573,7 -4561,6 +4582,7 @@@ S:      Maintaine
  F:    drivers/devfreq/
  F:    include/linux/devfreq.h
  F:    Documentation/devicetree/bindings/devfreq/
 +F:    include/trace/events/devfreq.h
  
  DEVICE FREQUENCY EVENT (DEVFREQ-EVENT)
  M:    Chanwoo Choi <cw00.choi@samsung.com>
@@@ -4621,7 -4608,7 +4630,7 @@@ DIALOG SEMICONDUCTOR DRIVER
  M:    Support Opensource <support.opensource@diasemi.com>
  W:    http://www.dialog-semiconductor.com/products
  S:    Supported
 -F:    Documentation/hwmon/da90??
 +F:    Documentation/hwmon/da90??.rst
  F:    Documentation/devicetree/bindings/mfd/da90*.txt
  F:    Documentation/devicetree/bindings/input/da90??-onkey.txt
  F:    Documentation/devicetree/bindings/thermal/da90??-thermal.txt
@@@ -4772,7 -4759,7 +4781,7 @@@ DME1737 HARDWARE MONITOR DRIVE
  M:    Juerg Haefliger <juergh@gmail.com>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/dme1737
 +F:    Documentation/hwmon/dme1737.rst
  F:    drivers/hwmon/dme1737.c
  
  DMI/SMBIOS SUPPORT
@@@ -4917,6 -4904,14 +4926,14 @@@ M:    Dave Airlie <airlied@redhat.com
  S:    Odd Fixes
  F:    drivers/gpu/drm/ast/
  
+ DRM DRIVER FOR ASPEED BMC GFX
+ M:    Joel Stanley <joel@jms.id.au>
+ L:    linux-aspeed@lists.ozlabs.org
+ T:    git git://anongit.freedesktop.org/drm/drm-misc
+ S:    Supported
+ F:    drivers/gpu/drm/aspeed/
+ F:    Documentation/devicetree/bindings/gpu/aspeed-gfx.txt
  DRM DRIVER FOR BOCHS VIRTUAL GPU
  M:    Gerd Hoffmann <kraxel@redhat.com>
  L:    virtualization@lists.linux-foundation.org
@@@ -4930,6 -4925,12 +4947,12 @@@ T:    git git://anongit.freedesktop.org/dr
  S:    Maintained
  F:    drivers/gpu/drm/tve200/
  
+ DRM DRIVER FOR FEIYANG FY07024DI26A30-D MIPI-DSI LCD PANELS
+ M:    Jagan Teki <jagan@amarulasolutions.com>
+ S:    Maintained
+ F:    drivers/gpu/drm/panel/panel-feiyang-fy07024di26a30d.c
+ F:    Documentation/devicetree/bindings/display/panel/feiyang,fy07024di26a30d.txt
  DRM DRIVER FOR ILITEK ILI9225 PANELS
  M:    David Lechner <david@lechnology.com>
  S:    Maintained
@@@ -5021,6 -5022,12 +5044,12 @@@ S:    Orphan / Obsolet
  F:    drivers/gpu/drm/r128/
  F:    include/uapi/drm/r128_drm.h
  
+ DRM DRIVER FOR ROCKTECH JH057N00900 PANELS
+ M:    Guido Günther <agx@sigxcpu.org>
+ S:    Maintained
+ F:    drivers/gpu/drm/panel/panel-rocktech-jh057n00900.c
+ F:    Documentation/devicetree/bindings/display/panel/rocktech,jh057n00900.txt
  DRM DRIVER FOR SAVAGE VIDEO CARDS
  S:    Orphan / Obsolete
  F:    drivers/gpu/drm/savage/
@@@ -5068,6 -5075,13 +5097,13 @@@ S:    Odd Fixe
  F:    drivers/gpu/drm/udl/
  T:    git git://anongit.freedesktop.org/drm/drm-misc
  
+ DRM DRIVER FOR VIRTUALBOX VIRTUAL GPU
+ M:    Hans de Goede <hdegoede@redhat.com>
+ L:    dri-devel@lists.freedesktop.org
+ S:    Maintained
+ F:    drivers/gpu/drm/vboxvideo/
+ T:    git git://anongit.freedesktop.org/drm/drm-misc
  DRM DRIVER FOR VIRTUAL KERNEL MODESETTING (VKMS)
  M:    Rodrigo Siqueira <rodrigosiqueiramelo@gmail.com>
  R:    Haneen Mohammed <hamohammed.sa@gmail.com>
@@@ -5202,6 -5216,15 +5238,15 @@@ S:    Maintaine
  F:    drivers/gpu/drm/hisilicon/
  F:    Documentation/devicetree/bindings/display/hisilicon/
  
+ DRM DRIVERS FOR LIMA
+ M:    Qiang Yu <yuq825@gmail.com>
+ L:    dri-devel@lists.freedesktop.org
+ L:    lima@lists.freedesktop.org (moderated for non-subscribers)
+ S:    Maintained
+ F:    drivers/gpu/drm/lima/
+ F:    include/uapi/drm/lima_drm.h
+ T:    git git://anongit.freedesktop.org/drm/drm-misc
  DRM DRIVERS FOR MEDIATEK
  M:    CK Hu <ck.hu@mediatek.com>
  M:    Philipp Zabel <p.zabel@pengutronix.de>
@@@ -5619,12 -5642,6 +5664,12 @@@ L:    linux-edac@vger.kernel.or
  S:    Maintained
  F:    drivers/edac/ghes_edac.c
  
 +EDAC-I10NM
 +M:    Tony Luck <tony.luck@intel.com>
 +L:    linux-edac@vger.kernel.org
 +S:    Maintained
 +F:    drivers/edac/i10nm_base.c
 +
  EDAC-I3000
  L:    linux-edac@vger.kernel.org
  S:    Orphan
@@@ -5706,7 -5723,7 +5751,7 @@@ EDAC-SKYLAK
  M:    Tony Luck <tony.luck@intel.com>
  L:    linux-edac@vger.kernel.org
  S:    Maintained
 -F:    drivers/edac/skx_edac.c
 +F:    drivers/edac/skx_*.c
  
  EDAC-TI
  M:    Tero Kristo <t-kristo@ti.com>
@@@ -5964,7 -5981,7 +6009,7 @@@ F71805F HARDWARE MONITORING DRIVE
  M:    Jean Delvare <jdelvare@suse.com>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/f71805f
 +F:    Documentation/hwmon/f71805f.rst
  F:    drivers/hwmon/f71805f.c
  
  FADDR2LINE
@@@ -6489,7 -6506,7 +6534,7 @@@ S:      Maintaine
  F:    drivers/media/radio/radio-gemtek*
  
  GENERIC GPIO I2C DRIVER
 -M:    Haavard Skinnemoen <hskinnemoen@gmail.com>
 +M:    Wolfram Sang <wsa+renesas@sang-engineering.com>
  S:    Supported
  F:    drivers/i2c/busses/i2c-gpio.c
  F:    include/linux/platform_data/i2c-gpio.h
@@@ -6621,7 -6638,7 +6666,7 @@@ M:      Andy Shevchenko <andriy.shevchenko@l
  L:    linux-gpio@vger.kernel.org
  L:    linux-acpi@vger.kernel.org
  S:    Maintained
 -F:    Documentation/acpi/gpio-properties.txt
 +F:    Documentation/firmware-guide/acpi/gpio-properties.rst
  F:    drivers/gpio/gpiolib-acpi.c
  
  GPIO IR Transmitter
@@@ -7361,6 -7378,7 +7406,6 @@@ F:      Documentation/devicetree/bindings/i3
  F:    Documentation/driver-api/i3c
  F:    drivers/i3c/
  F:    include/linux/i3c/
 -F:    include/dt-bindings/i3c/
  
  I3C DRIVER FOR SYNOPSYS DESIGNWARE
  M:    Vitor Soares <vitor.soares@synopsys.com>
@@@ -7645,7 -7663,7 +7690,7 @@@ INA209 HARDWARE MONITOR DRIVE
  M:    Guenter Roeck <linux@roeck-us.net>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/ina209
 +F:    Documentation/hwmon/ina209.rst
  F:    Documentation/devicetree/bindings/hwmon/ina2xx.txt
  F:    drivers/hwmon/ina209.c
  
@@@ -7653,7 -7671,7 +7698,7 @@@ INA2XX HARDWARE MONITOR DRIVE
  M:    Guenter Roeck <linux@roeck-us.net>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/ina2xx
 +F:    Documentation/hwmon/ina2xx.rst
  F:    drivers/hwmon/ina2xx.c
  F:    include/linux/platform_data/ina2xx.h
  
@@@ -8073,7 -8091,6 +8118,7 @@@ F:      drivers/gpio/gpio-intel-mid.
  
  INTERCONNECT API
  M:    Georgi Djakov <georgi.djakov@linaro.org>
 +L:    linux-pm@vger.kernel.org
  S:    Maintained
  F:    Documentation/interconnect/
  F:    Documentation/devicetree/bindings/interconnect/
@@@ -8282,7 -8299,7 +8327,7 @@@ IT87 HARDWARE MONITORING DRIVE
  M:    Jean Delvare <jdelvare@suse.com>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/it87
 +F:    Documentation/hwmon/it87.rst
  F:    drivers/hwmon/it87.c
  
  IT913X MEDIA DRIVER
@@@ -8326,7 -8343,7 +8371,7 @@@ M:      Guenter Roeck <linux@roeck-us.net
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
  F:    drivers/hwmon/jc42.c
 -F:    Documentation/hwmon/jc42
 +F:    Documentation/hwmon/jc42.rst
  
  JFS FILESYSTEM
  M:    Dave Kleikamp <shaggy@kernel.org>
@@@ -8374,14 -8391,14 +8419,14 @@@ K10TEMP HARDWARE MONITORING DRIVE
  M:    Clemens Ladisch <clemens@ladisch.de>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/k10temp
 +F:    Documentation/hwmon/k10temp.rst
  F:    drivers/hwmon/k10temp.c
  
  K8TEMP HARDWARE MONITORING DRIVER
  M:    Rudolf Marek <r.marek@assembler.cz>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/k8temp
 +F:    Documentation/hwmon/k8temp.rst
  F:    drivers/hwmon/k8temp.c
  
  KASAN
@@@ -8736,7 -8753,6 +8781,7 @@@ F:      scripts/leaking_addresses.p
  LED SUBSYSTEM
  M:    Jacek Anaszewski <jacek.anaszewski@gmail.com>
  M:    Pavel Machek <pavel@ucw.cz>
 +R:    Dan Murphy <dmurphy@ti.com>
  L:    linux-leds@vger.kernel.org
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski/linux-leds.git
  S:    Maintained
@@@ -9022,7 -9038,7 +9067,7 @@@ R:      Daniel Lustig <dlustig@nvidia.com
  L:    linux-kernel@vger.kernel.org
  L:    linux-arch@vger.kernel.org
  S:    Supported
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
  F:    tools/memory-model/
  F:    Documentation/atomic_bitops.txt
  F:    Documentation/atomic_t.txt
@@@ -9073,21 -9089,21 +9118,21 @@@ LM78 HARDWARE MONITOR DRIVE
  M:    Jean Delvare <jdelvare@suse.com>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/lm78
 +F:    Documentation/hwmon/lm78.rst
  F:    drivers/hwmon/lm78.c
  
  LM83 HARDWARE MONITOR DRIVER
  M:    Jean Delvare <jdelvare@suse.com>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/lm83
 +F:    Documentation/hwmon/lm83.rst
  F:    drivers/hwmon/lm83.c
  
  LM90 HARDWARE MONITOR DRIVER
  M:    Jean Delvare <jdelvare@suse.com>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/lm90
 +F:    Documentation/hwmon/lm90.rst
  F:    Documentation/devicetree/bindings/hwmon/lm90.txt
  F:    drivers/hwmon/lm90.c
  F:    include/dt-bindings/thermal/lm90.h
@@@ -9096,7 -9112,7 +9141,7 @@@ LM95234 HARDWARE MONITOR DRIVE
  M:    Guenter Roeck <linux@roeck-us.net>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/lm95234
 +F:    Documentation/hwmon/lm95234.rst
  F:    drivers/hwmon/lm95234.c
  
  LME2510 MEDIA DRIVER
@@@ -9128,6 -9144,7 +9173,6 @@@ F:      arch/*/include/asm/spinlock*.
  F:    include/linux/rwlock*.h
  F:    include/linux/mutex*.h
  F:    include/linux/rwsem*.h
 -F:    arch/*/include/asm/rwsem.h
  F:    include/linux/seqlock.h
  F:    lib/locking*.[ch]
  F:    kernel/locking/
@@@ -9169,7 -9186,7 +9214,7 @@@ LTC4261 HARDWARE MONITOR DRIVE
  M:    Guenter Roeck <linux@roeck-us.net>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/ltc4261
 +F:    Documentation/hwmon/ltc4261.rst
  F:    drivers/hwmon/ltc4261.c
  
  LTC4306 I2C MULTIPLEXER DRIVER
@@@ -9400,7 -9417,7 +9445,7 @@@ MAX16065 HARDWARE MONITOR DRIVE
  M:    Guenter Roeck <linux@roeck-us.net>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/max16065
 +F:    Documentation/hwmon/max16065.rst
  F:    drivers/hwmon/max16065.c
  
  MAX2175 SDR TUNER DRIVER
@@@ -9416,14 -9433,14 +9461,14 @@@ F:   include/uapi/linux/max2175.
  MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
  L:    linux-hwmon@vger.kernel.org
  S:    Orphan
 -F:    Documentation/hwmon/max6650
 +F:    Documentation/hwmon/max6650.rst
  F:    drivers/hwmon/max6650.c
  
  MAX6697 HARDWARE MONITOR DRIVER
  M:    Guenter Roeck <linux@roeck-us.net>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/max6697
 +F:    Documentation/hwmon/max6697.rst
  F:    Documentation/devicetree/bindings/hwmon/max6697.txt
  F:    drivers/hwmon/max6697.c
  F:    include/linux/platform_data/max6697.h
@@@ -9435,13 -9452,6 +9480,13 @@@ S:    Maintaine
  F:    Documentation/devicetree/bindings/sound/max9860.txt
  F:    sound/soc/codecs/max9860.*
  
 +MAXBOTIX ULTRASONIC RANGER IIO DRIVER
 +M:    Andreas Klinger <ak@it-klinger.de>
 +L:    linux-iio@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/iio/proximity/maxbotix,mb1232.txt
 +F:    drivers/iio/proximity/mb1232.c
 +
  MAXIM MAX77802 PMIC REGULATOR DEVICE DRIVER
  M:    Javier Martinez Canillas <javier@dowhile0.org>
  L:    linux-kernel@vger.kernel.org
@@@ -9810,17 -9820,9 +9855,17 @@@ F:    drivers/media/platform/mtk-vpu
  F:    Documentation/devicetree/bindings/media/mediatek-vcodec.txt
  F:    Documentation/devicetree/bindings/media/mediatek-vpu.txt
  
 +MEDIATEK MMC/SD/SDIO DRIVER
 +M:    Chaotian Jing <chaotian.jing@mediatek.com>
 +S:    Maintained
 +F:    drivers/mmc/host/mtk-sd.c
 +F:    Documentation/devicetree/bindings/mmc/mtk-sd.txt
 +
  MEDIATEK MT76 WIRELESS LAN DRIVER
  M:    Felix Fietkau <nbd@nbd.name>
  M:    Lorenzo Bianconi <lorenzo.bianconi83@gmail.com>
 +R:    Ryder Lee <ryder.lee@mediatek.com>
 +R:    Roy Luo <royluo@google.com>
  L:    linux-wireless@vger.kernel.org
  S:    Maintained
  F:    drivers/net/wireless/mediatek/mt76/
@@@ -9919,6 -9921,15 +9964,6 @@@ F:     drivers/net/ethernet/mellanox/mlx5/c
  F:    drivers/net/ethernet/mellanox/mlx5/core/fpga/*
  F:    include/linux/mlx5/mlx5_ifc_fpga.h
  
 -MELLANOX ETHERNET INNOVA IPSEC DRIVER
 -R:    Boris Pismenny <borisp@mellanox.com>
 -L:    netdev@vger.kernel.org
 -S:    Supported
 -W:    http://www.mellanox.com
 -Q:    http://patchwork.ozlabs.org/project/netdev/list/
 -F:    drivers/net/ethernet/mellanox/mlx5/core/en_ipsec/*
 -F:    drivers/net/ethernet/mellanox/mlx5/core/ipsec*
 -
  MELLANOX ETHERNET SWITCH DRIVERS
  M:    Jiri Pirko <jiri@mellanox.com>
  M:    Ido Schimmel <idosch@mellanox.com>
@@@ -10075,7 -10086,7 +10120,7 @@@ F:   drivers/mfd/menf21bmc.
  F:    drivers/watchdog/menf21bmc_wdt.c
  F:    drivers/leds/leds-menf21bmc.c
  F:    drivers/hwmon/menf21bmc_hwmon.c
 -F:    Documentation/hwmon/menf21bmc
 +F:    Documentation/hwmon/menf21bmc.rst
  
  MEN Z069 WATCHDOG DRIVER
  M:    Johannes Thumshirn <jth@kernel.org>
@@@ -10090,7 -10101,6 +10135,7 @@@ L:   linux-amlogic@lists.infradead.or
  W:    http://linux-meson.com/
  S:    Supported
  F:    drivers/media/platform/meson/ao-cec.c
 +F:    drivers/media/platform/meson/ao-cec-g12a.c
  F:    Documentation/devicetree/bindings/media/meson-ao-cec.txt
  T:    git git://linuxtv.org/media_tree.git
  
@@@ -10180,7 -10190,7 +10225,7 @@@ F:   drivers/spi/spi-at91-usart.
  F:    Documentation/devicetree/bindings/mfd/atmel-usart.txt
  
  MICROCHIP KSZ SERIES ETHERNET SWITCH DRIVER
 -M:    Woojung Huh <Woojung.Huh@microchip.com>
 +M:    Woojung Huh <woojung.huh@microchip.com>
  M:    Microchip Linux Driver Support <UNGLinuxDriver@microchip.com>
  L:    netdev@vger.kernel.org
  S:    Maintained
@@@ -10469,7 -10479,7 +10514,7 @@@ F:   include/uapi/linux/meye.
  MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
  M:    Jiri Slaby <jirislaby@gmail.com>
  S:    Maintained
 -F:    Documentation/serial/moxa-smartio
 +F:    Documentation/serial/moxa-smartio.rst
  F:    drivers/tty/mxser.*
  
  MR800 AVERMEDIA USB FM RADIO DRIVER
@@@ -10704,7 -10714,7 +10749,7 @@@ NCT6775 HARDWARE MONITOR DRIVE
  M:    Guenter Roeck <linux@roeck-us.net>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/nct6775
 +F:    Documentation/hwmon/nct6775.rst
  F:    drivers/hwmon/nct6775.c
  
  NET_FAILOVER MODULE
@@@ -10782,7 -10792,6 +10827,7 @@@ L:   linux-block@vger.kernel.or
  L:    nbd@other.debian.org
  F:    Documentation/blockdev/nbd.txt
  F:    drivers/block/nbd.c
 +F:    include/trace/events/nbd.h
  F:    include/uapi/linux/nbd.h
  
  NETWORK DROP MONITOR
@@@ -11153,16 -11162,6 +11198,16 @@@ F: Documentation/ABI/stable/sysfs-bus-n
  F:    include/linux/nvmem-consumer.h
  F:    include/linux/nvmem-provider.h
  
 +NXP FXAS21002C DRIVER
 +M:    Rui Miguel Silva <rmfrfs@gmail.com>
 +L:    linux-iio@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/iio/gyroscope/fxas21002c.txt
 +F:    drivers/iio/gyro/fxas21002c_core.c
 +F:    drivers/iio/gyro/fxas21002c.h
 +F:    drivers/iio/gyro/fxas21002c_i2c.c
 +F:    drivers/iio/gyro/fxas21002c_spi.c
 +
  NXP SGTL5000 DRIVER
  M:    Fabio Estevam <festevam@gmail.com>
  L:    alsa-devel@alsa-project.org (moderated for non-subscribers)
@@@ -11170,12 -11169,6 +11215,12 @@@ S: Maintaine
  F:    Documentation/devicetree/bindings/sound/sgtl5000.txt
  F:    sound/soc/codecs/sgtl5000*
  
 +NXP SJA1105 ETHERNET SWITCH DRIVER
 +M:    Vladimir Oltean <olteanv@gmail.com>
 +L:    linux-kernel@vger.kernel.org
 +S:    Maintained
 +F:    drivers/net/dsa/sja1105
 +
  NXP TDA998X DRM DRIVER
  M:    Russell King <linux@armlinux.org.uk>
  S:    Maintained
@@@ -11729,14 -11722,6 +11774,14 @@@ L: linux-i2c@vger.kernel.or
  S:    Orphan
  F:    drivers/i2c/busses/i2c-pasemi.c
  
 +PACKING
 +M:    Vladimir Oltean <olteanv@gmail.com>
 +L:    netdev@vger.kernel.org
 +S:    Supported
 +F:    lib/packing.c
 +F:    include/linux/packing.h
 +F:    Documentation/packing.txt
 +
  PADATA PARALLEL EXECUTION MECHANISM
  M:    Steffen Klassert <steffen.klassert@secunet.com>
  L:    linux-crypto@vger.kernel.org
@@@ -11823,7 -11808,7 +11868,7 @@@ PC87360 HARDWARE MONITORING DRIVE
  M:    Jim Cromie <jim.cromie@gmail.com>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/pc87360
 +F:    Documentation/hwmon/pc87360.rst
  F:    drivers/hwmon/pc87360.c
  
  PC8736x GPIO DRIVER
@@@ -11835,7 -11820,7 +11880,7 @@@ PC87427 HARDWARE MONITORING DRIVE
  M:    Jean Delvare <jdelvare@suse.com>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/pc87427
 +F:    Documentation/hwmon/pc87427.rst
  F:    drivers/hwmon/pc87427.c
  
  PCA9532 LED DRIVER
@@@ -12235,7 -12220,6 +12280,7 @@@ F:   arch/*/kernel/*/*/perf_event*.
  F:    arch/*/include/asm/perf_event.h
  F:    arch/*/kernel/perf_callchain.c
  F:    arch/*/events/*
 +F:    arch/*/events/*/*
  F:    tools/perf/
  
  PERSONALITY HANDLING
@@@ -12404,23 -12388,23 +12449,23 @@@ S:        Maintaine
  F:    Documentation/devicetree/bindings/hwmon/ibm,cffps1.txt
  F:    Documentation/devicetree/bindings/hwmon/max31785.txt
  F:    Documentation/devicetree/bindings/hwmon/ltc2978.txt
 -F:    Documentation/hwmon/adm1275
 -F:    Documentation/hwmon/ibm-cffps
 -F:    Documentation/hwmon/ir35221
 -F:    Documentation/hwmon/lm25066
 -F:    Documentation/hwmon/ltc2978
 -F:    Documentation/hwmon/ltc3815
 -F:    Documentation/hwmon/max16064
 -F:    Documentation/hwmon/max20751
 -F:    Documentation/hwmon/max31785
 -F:    Documentation/hwmon/max34440
 -F:    Documentation/hwmon/max8688
 -F:    Documentation/hwmon/pmbus
 -F:    Documentation/hwmon/pmbus-core
 -F:    Documentation/hwmon/tps40422
 -F:    Documentation/hwmon/ucd9000
 -F:    Documentation/hwmon/ucd9200
 -F:    Documentation/hwmon/zl6100
 +F:    Documentation/hwmon/adm1275.rst
 +F:    Documentation/hwmon/ibm-cffps.rst
 +F:    Documentation/hwmon/ir35221.rst
 +F:    Documentation/hwmon/lm25066.rst
 +F:    Documentation/hwmon/ltc2978.rst
 +F:    Documentation/hwmon/ltc3815.rst
 +F:    Documentation/hwmon/max16064.rst
 +F:    Documentation/hwmon/max20751.rst
 +F:    Documentation/hwmon/max31785.rst
 +F:    Documentation/hwmon/max34440.rst
 +F:    Documentation/hwmon/max8688.rst
 +F:    Documentation/hwmon/pmbus.rst
 +F:    Documentation/hwmon/pmbus-core.rst
 +F:    Documentation/hwmon/tps40422.rst
 +F:    Documentation/hwmon/ucd9000.rst
 +F:    Documentation/hwmon/ucd9200.rst
 +F:    Documentation/hwmon/zl6100.rst
  F:    drivers/hwmon/pmbus/
  F:    include/linux/pmbus.h
  
@@@ -12476,7 -12460,7 +12521,7 @@@ M:   Mark Rutland <mark.rutland@arm.com
  M:    Lorenzo Pieralisi <lorenzo.pieralisi@arm.com>
  L:    linux-arm-kernel@lists.infradead.org
  S:    Maintained
 -F:    drivers/firmware/psci*.c
 +F:    drivers/firmware/psci/
  F:    include/linux/psci.h
  F:    include/uapi/linux/psci.h
  
@@@ -12684,7 -12668,7 +12729,7 @@@ M:   Bartlomiej Zolnierkiewicz <b.zolnier
  L:    linux-hwmon@vger.kernel.org
  S:    Supported
  F:    Documentation/devicetree/bindings/hwmon/pwm-fan.txt
 -F:    Documentation/hwmon/pwm-fan
 +F:    Documentation/hwmon/pwm-fan.rst
  F:    drivers/hwmon/pwm-fan.c
  
  PWM IR Transmitter
@@@ -13102,9 -13086,9 +13147,9 @@@ M:   Josh Triplett <josh@joshtriplett.org
  R:    Steven Rostedt <rostedt@goodmis.org>
  R:    Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  R:    Lai Jiangshan <jiangshanlai@gmail.com>
 -L:    linux-kernel@vger.kernel.org
 +L:    rcu@vger.kernel.org
  S:    Supported
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
  F:    tools/testing/selftests/rcutorture
  
  RDC R-321X SoC
@@@ -13150,10 -13134,10 +13195,10 @@@ R:        Steven Rostedt <rostedt@goodmis.org
  R:    Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
  R:    Lai Jiangshan <jiangshanlai@gmail.com>
  R:    Joel Fernandes <joel@joelfernandes.org>
 -L:    linux-kernel@vger.kernel.org
 +L:    rcu@vger.kernel.org
  W:    http://www.rdrop.com/users/paulmck/RCU/
  S:    Supported
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
  F:    Documentation/RCU/
  X:    Documentation/RCU/torture.txt
  F:    include/linux/rcu*
@@@ -13379,7 -13363,7 +13424,7 @@@ ROCKETPORT DRIVE
  P:    Comtrol Corp.
  W:    http://www.comtrol.com
  S:    Maintained
 -F:    Documentation/serial/rocket.txt
 +F:    Documentation/serial/rocket.rst
  F:    drivers/tty/rocket*
  
  ROCKETPORT EXPRESS/INFINITY DRIVER
@@@ -13463,12 -13447,6 +13508,12 @@@ T: git git://git.kernel.org/pub/scm/lin
  S:    Maintained
  F:    drivers/net/wireless/realtek/rtlwifi/
  
 +REALTEK WIRELESS DRIVER (rtw88)
 +M:    Yan-Hsuan Chuang <yhchuang@realtek.com>
 +L:    linux-wireless@vger.kernel.org
 +S:    Maintained
 +F:    drivers/net/wireless/realtek/rtw88/
 +
  RTL8XXXU WIRELESS DRIVER (rtl8xxxu)
  M:    Jes Sorensen <Jes.Sorensen@gmail.com>
  L:    linux-wireless@vger.kernel.org
@@@ -14014,7 -13992,7 +14059,7 @@@ W:   https://selinuxproject.or
  W:    https://github.com/SELinuxProject
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/pcmoore/selinux.git
  S:    Supported
 -F:    include/linux/selinux*
 +F:    include/uapi/linux/selinux_netlink.h
  F:    security/selinux/
  F:    scripts/selinux/
  F:    Documentation/admin-guide/LSM/SELinux.rst
@@@ -14311,10 -14289,10 +14356,10 @@@ M:        "Paul E. McKenney" <paulmck@linux.ib
  M:    Josh Triplett <josh@joshtriplett.org>
  R:    Steven Rostedt <rostedt@goodmis.org>
  R:    Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
 -L:    linux-kernel@vger.kernel.org
 +L:    rcu@vger.kernel.org
  W:    http://www.rdrop.com/users/paulmck/RCU/
  S:    Supported
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
  F:    include/linux/srcu*.h
  F:    kernel/rcu/srcu*.c
  
@@@ -14355,21 -14333,21 +14400,21 @@@ SMM665 HARDWARE MONITOR DRIVE
  M:    Guenter Roeck <linux@roeck-us.net>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/smm665
 +F:    Documentation/hwmon/smm665.rst
  F:    drivers/hwmon/smm665.c
  
  SMSC EMC2103 HARDWARE MONITOR DRIVER
  M:    Steve Glendinning <steve.glendinning@shawell.net>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/emc2103
 +F:    Documentation/hwmon/emc2103.rst
  F:    drivers/hwmon/emc2103.c
  
  SMSC SCH5627 HARDWARE MONITOR DRIVER
  M:    Hans de Goede <hdegoede@redhat.com>
  L:    linux-hwmon@vger.kernel.org
  S:    Supported
 -F:    Documentation/hwmon/sch5627
 +F:    Documentation/hwmon/sch5627.rst
  F:    drivers/hwmon/sch5627.c
  
  SMSC UFX6000 and UFX7000 USB to VGA DRIVER
@@@ -14382,7 -14360,7 +14427,7 @@@ SMSC47B397 HARDWARE MONITOR DRIVE
  M:    Jean Delvare <jdelvare@suse.com>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/smsc47b397
 +F:    Documentation/hwmon/smsc47b397.rst
  F:    drivers/hwmon/smsc47b397.c
  
  SMSC911x ETHERNET DRIVER
@@@ -14402,8 -14380,9 +14447,8 @@@ SOC-CAMERA V4L2 SUBSYSTE
  L:    linux-media@vger.kernel.org
  T:    git git://linuxtv.org/media_tree.git
  S:    Orphan
 -F:    include/media/soc*
 -F:    drivers/media/i2c/soc_camera/
 -F:    drivers/media/platform/soc_camera/
 +F:    include/media/soc_camera.h
 +F:    drivers/staging/media/soc_camera/
  
  SOCIONEXT SYNQUACER I2C DRIVER
  M:    Ard Biesheuvel <ard.biesheuvel@linaro.org>
@@@ -14539,15 -14518,16 +14584,15 @@@ T:        git git://linuxtv.org/media_tree.gi
  S:    Maintained
  F:    drivers/media/i2c/imx355.c
  
 -SONY MEMORYSTICK CARD SUPPORT
 -M:    Alex Dubov <oakad@yahoo.com>
 -W:    http://tifmxx.berlios.de/
 -S:    Maintained
 -F:    drivers/memstick/host/tifm_ms.c
 -
 -SONY MEMORYSTICK STANDARD SUPPORT
 +SONY MEMORYSTICK SUBSYSTEM
  M:    Maxim Levitsky <maximlevitsky@gmail.com>
 +M:    Alex Dubov <oakad@yahoo.com>
 +M:    Ulf Hansson <ulf.hansson@linaro.org>
 +L:    linux-mmc@vger.kernel.org
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/ulfh/mmc.git
  S:    Maintained
 -F:    drivers/memstick/core/ms_block.*
 +F:    drivers/memstick/
 +F:    include/linux/memstick.h
  
  SONY VAIO CONTROL DEVICE DRIVER
  M:    Mattia Dongili <malattia@linux.it>
@@@ -14743,14 -14723,6 +14788,14 @@@ S: Maintaine
  F:    drivers/iio/imu/st_lsm6dsx/
  F:    Documentation/devicetree/bindings/iio/imu/st_lsm6dsx.txt
  
 +ST MIPID02 CSI-2 TO PARALLEL BRIDGE DRIVER
 +M:    Mickael Guene <mickael.guene@st.com>
 +L:    linux-media@vger.kernel.org
 +T:    git git://linuxtv.org/media_tree.git
 +S:    Maintained
 +F:    drivers/media/i2c/st-mipid02.c
 +F:    Documentation/devicetree/bindings/media/i2c/st,st-mipid02.txt
 +
  ST STM32 I2C/SMBUS DRIVER
  M:    Pierre-Yves MORDRET <pierre-yves.mordret@st.com>
  L:    linux-i2c@vger.kernel.org
@@@ -15387,11 -15359,6 +15432,11 @@@ M: Laxman Dewangan <ldewangan@nvidia.co
  S:    Supported
  F:    drivers/spi/spi-tegra*
  
 +TEGRA XUSB PADCTL DRIVER
 +M:    JC Kuo <jckuo@nvidia.com>
 +S:    Supported
 +F:    drivers/phy/tegra/xusb*
 +
  TEHUTI ETHERNET DRIVER
  M:    Andy Gospodarek <andy@greyhouse.net>
  L:    netdev@vger.kernel.org
@@@ -15585,11 -15552,9 +15630,11 @@@ S: Maintaine
  F:    drivers/net/ethernet/ti/cpsw*
  F:    drivers/net/ethernet/ti/davinci*
  
 -TI FLASH MEDIA INTERFACE DRIVER
 +TI FLASH MEDIA MEMORYSTICK/MMC DRIVERS
  M:    Alex Dubov <oakad@yahoo.com>
  S:    Maintained
 +W:    http://tifmxx.berlios.de/
 +F:    drivers/memstick/host/tifm_ms.c
  F:    drivers/misc/tifm*
  F:    drivers/mmc/host/tifm_sd.c
  F:    include/linux/tifm.h
@@@ -15741,7 -15706,7 +15786,7 @@@ TMP401 HARDWARE MONITOR DRIVE
  M:    Guenter Roeck <linux@roeck-us.net>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/tmp401
 +F:    Documentation/hwmon/tmp401.rst
  F:    drivers/hwmon/tmp401.c
  
  TMPFS (SHMEM FILESYSTEM)
@@@ -15774,7 -15739,7 +15819,7 @@@ M:   "Paul E. McKenney" <paulmck@linux.ib
  M:    Josh Triplett <josh@joshtriplett.org>
  L:    linux-kernel@vger.kernel.org
  S:    Supported
 -T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git
 +T:    git git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-rcu.git dev
  F:    Documentation/RCU/torture.txt
  F:    kernel/torture.c
  F:    kernel/rcu/rcutorture.c
@@@ -16016,12 -15981,6 +16061,12 @@@ F: drivers/uwb
  F:    include/linux/uwb.h
  F:    include/linux/uwb/
  
 +UNICODE SUBSYSTEM:
 +M:    Gabriel Krisman Bertazi <krisman@collabora.com>
 +L:    linux-fsdevel@vger.kernel.org
 +S:    Supported
 +F:    fs/unicode/
 +
  UNICORE32 ARCHITECTURE:
  M:    Guan Xuetao <gxt@pku.edu.cn>
  W:    http://mprc.pku.edu.cn/~guanxuetao/linux
@@@ -16067,13 -16026,6 +16112,13 @@@ L: linux-scsi@vger.kernel.or
  S:    Supported
  F:    drivers/scsi/ufs/*dwc*
  
 +UNIVERSAL FLASH STORAGE HOST CONTROLLER DRIVER MEDIATEK HOOKS
 +M:    Stanley Chu <stanley.chu@mediatek.com>
 +L:    linux-scsi@vger.kernel.org
 +L:    linux-mediatek@lists.infradead.org (moderated for non-subscribers)
 +S:    Maintained
 +F:    drivers/scsi/ufs/ufs-mediatek*
 +
  UNSORTED BLOCK IMAGES (UBI)
  M:    Artem Bityutskiy <dedekind1@gmail.com>
  M:    Richard Weinberger <richard@nod.at>
@@@ -16177,14 -16129,6 +16222,14 @@@ L: linux-usb@vger.kernel.or
  S:    Maintained
  F:    drivers/usb/roles/intel-xhci-usb-role-switch.c
  
 +USB IP DRIVER FOR HISILICON KIRIN
 +M:    Yu Chen <chenyu56@huawei.com>
 +M:    Binghui Wang <wangbinghui@hisilicon.com>
 +L:    linux-usb@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/devicetree/bindings/phy/phy-hi3660-usb3.txt
 +F:    drivers/phy/hisilicon/phy-hi3660-usb3.c
 +
  USB ISP116X DRIVER
  M:    Olav Kongas <ok@artecdesign.ee>
  L:    linux-usb@vger.kernel.org
@@@ -16800,7 -16744,7 +16845,7 @@@ VT1211 HARDWARE MONITOR DRIVE
  M:    Juerg Haefliger <juergh@gmail.com>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/vt1211
 +F:    Documentation/hwmon/vt1211.rst
  F:    drivers/hwmon/vt1211.c
  
  VT8231 HARDWARE MONITOR DRIVER
@@@ -16828,14 -16772,14 +16873,14 @@@ W83791D HARDWARE MONITORING DRIVE
  M:    Marc Hulsman <m.hulsman@tudelft.nl>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/w83791d
 +F:    Documentation/hwmon/w83791d.rst
  F:    drivers/hwmon/w83791d.c
  
  W83793 HARDWARE MONITORING DRIVER
  M:    Rudolf Marek <r.marek@assembler.cz>
  L:    linux-hwmon@vger.kernel.org
  S:    Maintained
 -F:    Documentation/hwmon/w83793
 +F:    Documentation/hwmon/w83793.rst
  F:    drivers/hwmon/w83793.c
  
  W83795 HARDWARE MONITORING DRIVER
@@@ -16944,7 -16888,7 +16989,7 @@@ L:   patches@opensource.cirrus.co
  T:    git https://github.com/CirrusLogic/linux-drivers.git
  W:    https://github.com/CirrusLogic/linux-drivers/wiki
  S:    Supported
 -F:    Documentation/hwmon/wm83??
 +F:    Documentation/hwmon/wm83??.rst
  F:    Documentation/devicetree/bindings/extcon/extcon-arizona.txt
  F:    Documentation/devicetree/bindings/regulator/arizona-regulator.txt
  F:    Documentation/devicetree/bindings/mfd/arizona.txt
@@@ -17034,7 -16978,7 +17079,7 @@@ M:   Tony Luck <tony.luck@intel.com
  M:    Borislav Petkov <bp@alien8.de>
  L:    linux-edac@vger.kernel.org
  S:    Maintained
 -F:    arch/x86/kernel/cpu/mcheck/*
 +F:    arch/x86/kernel/cpu/mce/*
  
  X86 MICROCODE UPDATE SUPPORT
  M:    Borislav Petkov <bp@alien8.de>
index 79fb302fb9543f93cfb9738700f53e34006e869c,9ec6356d3f0b7b02d7bd20f6b5bf085a4478cabc..cc8ad3831982d5e2e4dfa60ec76581fb9a3d777c
@@@ -60,6 -60,7 +60,7 @@@
  #include "amdgpu_pm.h"
  
  #include "amdgpu_xgmi.h"
+ #include "amdgpu_ras.h"
  
  MODULE_FIRMWARE("amdgpu/vega10_gpu_info.bin");
  MODULE_FIRMWARE("amdgpu/vega12_gpu_info.bin");
@@@ -1506,7 -1507,9 +1507,9 @@@ static int amdgpu_device_ip_early_init(
                        return -EAGAIN;
        }
  
-       adev->powerplay.pp_feature = amdgpu_pp_feature_mask;
+       adev->pm.pp_feature = amdgpu_pp_feature_mask;
+       if (amdgpu_sriov_vf(adev))
+               adev->pm.pp_feature &= ~PP_GFXOFF_MASK;
  
        for (i = 0; i < adev->num_ip_blocks; i++) {
                if ((amdgpu_ip_block_mask & (1 << i)) == 0) {
@@@ -1638,6 -1641,10 +1641,10 @@@ static int amdgpu_device_ip_init(struc
  {
        int i, r;
  
+       r = amdgpu_ras_init(adev);
+       if (r)
+               return r;
        for (i = 0; i < adev->num_ip_blocks; i++) {
                if (!adev->ip_blocks[i].status.valid)
                        continue;
                }
        }
  
+       r = amdgpu_ib_pool_init(adev);
+       if (r) {
+               dev_err(adev->dev, "IB initialization failed (%d).\n", r);
+               amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
+               goto init_failed;
+       }
        r = amdgpu_ucode_create_bo(adev); /* create ucode bo when sw_init complete*/
        if (r)
                goto init_failed;
@@@ -1869,6 -1883,8 +1883,8 @@@ static int amdgpu_device_ip_fini(struc
  {
        int i, r;
  
+       amdgpu_ras_pre_fini(adev);
        if (adev->gmc.xgmi.num_physical_nodes > 1)
                amdgpu_xgmi_remove_device(adev);
  
                        amdgpu_free_static_csa(&adev->virt.csa_obj);
                        amdgpu_device_wb_fini(adev);
                        amdgpu_device_vram_scratch_fini(adev);
+                       amdgpu_ib_pool_fini(adev);
                }
  
                r = adev->ip_blocks[i].version->funcs->sw_fini((void *)adev);
                adev->ip_blocks[i].status.late_initialized = false;
        }
  
+       amdgpu_ras_fini(adev);
        if (amdgpu_sriov_vf(adev))
                if (amdgpu_virt_release_full_gpu(adev, false))
                        DRM_ERROR("failed to release exclusive mode on fini\n");
@@@ -1999,6 -2018,10 +2018,10 @@@ static void amdgpu_device_ip_late_init_
        r = amdgpu_device_enable_mgpu_fan_boost();
        if (r)
                DRM_ERROR("enable mgpu fan boost failed (%d).\n", r);
+       /*set to low pstate by default */
+       amdgpu_xgmi_set_pstate(adev, 0);
  }
  
  static void amdgpu_device_delay_enable_gfx_off(struct work_struct *work)
@@@ -2369,7 -2392,7 +2392,7 @@@ static void amdgpu_device_xgmi_reset_fu
  
        adev->asic_reset_res =  amdgpu_asic_reset(adev);
        if (adev->asic_reset_res)
-               DRM_WARN("ASIC reset failed with err r, %d for drm dev, %s",
+               DRM_WARN("ASIC reset failed with error, %d for drm dev, %s",
                         adev->asic_reset_res, adev->ddev->unique);
  }
  
@@@ -2448,6 -2471,7 +2471,7 @@@ int amdgpu_device_init(struct amdgpu_de
        mutex_init(&adev->virt.vf_errors.lock);
        hash_init(adev->mn_hash);
        mutex_init(&adev->lock_reset);
+       mutex_init(&adev->virt.dpm_mutex);
  
        amdgpu_device_check_arguments(adev);
  
@@@ -2642,13 -2666,6 +2666,6 @@@ fence_driver_init
        /* Get a log2 for easy divisions. */
        adev->mm_stats.log2_max_MBps = ilog2(max(1u, max_MBps));
  
-       r = amdgpu_ib_pool_init(adev);
-       if (r) {
-               dev_err(adev->dev, "IB initialization failed (%d).\n", r);
-               amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_IB_INIT_FAIL, 0, r);
-               goto failed;
-       }
        amdgpu_fbdev_init(adev);
  
        r = amdgpu_pm_sysfs_init(adev);
                goto failed;
        }
  
+       /* must succeed. */
+       amdgpu_ras_post_init(adev);
        return 0;
  
  failed:
@@@ -2726,7 -2746,6 +2746,6 @@@ void amdgpu_device_fini(struct amdgpu_d
                else
                        drm_atomic_helper_shutdown(adev->ddev);
        }
-       amdgpu_ib_pool_fini(adev);
        amdgpu_fence_driver_fini(adev);
        amdgpu_pm_sysfs_fini(adev);
        amdgpu_fbdev_fini(adev);
@@@ -3165,7 -3184,6 +3184,7 @@@ static int amdgpu_device_recover_vram(s
  
                /* No need to recover an evicted BO */
                if (shadow->tbo.mem.mem_type != TTM_PL_TT ||
 +                  shadow->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET ||
                    shadow->parent->tbo.mem.mem_type != TTM_PL_VRAM)
                        continue;
  
@@@ -3225,6 -3243,8 +3244,8 @@@ static int amdgpu_device_reset_sriov(st
        if (r)
                return r;
  
+       amdgpu_amdkfd_pre_reset(adev);
        /* Resume IP prior to SMC */
        r = amdgpu_device_ip_reinit_early_sriov(adev);
        if (r)
  
        amdgpu_irq_gpu_reset_resume_helper(adev);
        r = amdgpu_ib_ring_tests(adev);
+       amdgpu_amdkfd_post_reset(adev);
  
  error:
        amdgpu_virt_init_data_exchange(adev);
@@@ -3376,7 -3397,7 +3398,7 @@@ static int amdgpu_do_asic_reset(struct 
                                r = amdgpu_asic_reset(tmp_adev);
  
                        if (r) {
-                               DRM_ERROR("ASIC reset failed with err r, %d for drm dev, %s",
+                               DRM_ERROR("ASIC reset failed with error, %d for drm dev, %s",
                                         r, tmp_adev->ddev->unique);
                                break;
                        }
                                                break;
                                }
                        }
+                       list_for_each_entry(tmp_adev, device_list_handle,
+                                       gmc.xgmi.head) {
+                               amdgpu_ras_reserve_bad_pages(tmp_adev);
+                       }
                }
        }
  
  
                                vram_lost = amdgpu_device_check_vram_lost(tmp_adev);
                                if (vram_lost) {
-                                       DRM_ERROR("VRAM is lost!\n");
+                                       DRM_INFO("VRAM is lost due to GPU reset!\n");
                                        atomic_inc(&tmp_adev->vram_lost_counter);
                                }
  
index 1696644ec022391d24b93df9f1dacd23079bd72e,1741056e6af661c832c97dab2c26a15e270a23d0..41a9a577962371727e5cd2c7ee81866e5a77e356
@@@ -163,7 -163,7 +163,7 @@@ static void mmhub_v1_0_init_cache_regs(
        /* XXX for emulation, Refer to closed source code.*/
        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE,
                            0);
-       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 1);
+       tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0);
        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0);
        WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL, tmp);
                tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3,
                                    L2_CACHE_BIGK_FRAGMENT_SIZE, 6);
        }
 +      WREG32_SOC15(MMHUB, 0, mmVM_L2_CNTL3, tmp);
  
        tmp = mmVM_L2_CNTL4_DEFAULT;
        tmp = REG_SET_FIELD(tmp, VM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0);
@@@ -256,7 -255,7 +256,7 @@@ static void mmhub_v1_0_setup_vmid_confi
                                    block_size);
                /* Send no-retry XNACK on fault to suppress VM fault storm. */
                tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL,
-                                   RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0);
+                                   RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 1);
                WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_CNTL, i, tmp);
                WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, i*2, 0);
                WREG32_SOC15_OFFSET(MMHUB, 0, mmVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, i*2, 0);
index a6cda201c964c5bc918e8d693c2aa2fccf65eb58,dda10b1f8574bffe42b154db6ac09517422da639..18c775a950cc3582261d176597d7ab0b9eff8383
@@@ -524,6 -524,14 +524,14 @@@ void dc_link_set_preferred_link_setting
        struct dc_stream_state *link_stream;
        struct dc_link_settings store_settings = *link_setting;
  
+       link->preferred_link_setting = store_settings;
+       /* Retrain with preferred link settings only relevant for
+        * DP signal type
+        */
+       if (!dc_is_dp_signal(link->connector_signal))
+               return;
        for (i = 0; i < MAX_PIPES; i++) {
                pipe = &dc->current_state->res_ctx.pipe_ctx[i];
                if (pipe->stream && pipe->stream->link) {
  
        link_stream = link->dc->current_state->res_ctx.pipe_ctx[i].stream;
  
-       link->preferred_link_setting = store_settings;
+       /* Cannot retrain link if backend is off */
+       if (link_stream->dpms_off)
+               return;
        if (link_stream)
                decide_link_settings(link_stream, &store_settings);
  
@@@ -573,6 -584,28 +584,28 @@@ void dc_link_set_test_pattern(struct dc
                        cust_pattern_size);
  }
  
+ uint32_t dc_link_bandwidth_kbps(
+       const struct dc_link *link,
+       const struct dc_link_settings *link_setting)
+ {
+       uint32_t link_bw_kbps = link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* bytes per sec */
+       link_bw_kbps *= 8;   /* 8 bits per byte*/
+       link_bw_kbps *= link_setting->lane_count;
+       return link_bw_kbps;
+ }
+ const struct dc_link_settings *dc_link_get_link_cap(
+               const struct dc_link *link)
+ {
+       if (link->preferred_link_setting.lane_count != LANE_COUNT_UNKNOWN &&
+                       link->preferred_link_setting.link_rate != LINK_RATE_UNKNOWN)
+               return &link->preferred_link_setting;
+       return &link->verified_link_cap;
+ }
  static void destruct(struct dc *dc)
  {
        dc_release_state(dc->current_state);
@@@ -621,6 -654,10 +654,10 @@@ static bool construct(struct dc *dc
  #endif
  
        enum dce_version dc_version = DCE_VERSION_UNKNOWN;
+       dc->config = init_params->flags;
+       memcpy(&dc->bb_overrides, &init_params->bb_overrides, sizeof(dc->bb_overrides));
        dc_dceip = kzalloc(sizeof(*dc_dceip), GFP_KERNEL);
        if (!dc_dceip) {
                dm_error("%s: failed to create dceip\n", __func__);
        dc_ctx->dc_stream_id_count = 0;
        dc->ctx = dc_ctx;
  
-       dc->current_state = dc_create_state();
-       if (!dc->current_state) {
-               dm_error("%s: failed to create validate ctx\n", __func__);
-               goto fail;
-       }
        /* Create logger */
  
        dc_ctx->dce_environment = init_params->dce_environment;
                goto fail;
        }
  
-       dc->res_pool = dc_create_resource_pool(
-                       dc,
-                       init_params->num_virtual_links,
-                       dc_version,
-                       init_params->asic_id);
+       dc->res_pool = dc_create_resource_pool(dc, init_params, dc_version);
        if (!dc->res_pool)
                goto fail;
  
+       /* Creation of current_state must occur after dc->dml
+        * is initialized in dc_create_resource_pool because
+        * on creation it copies the contents of dc->dml
+        */
+       dc->current_state = dc_create_state(dc);
+       if (!dc->current_state) {
+               dm_error("%s: failed to create validate ctx\n", __func__);
+               goto fail;
+       }
        dc_resource_state_construct(dc, dc->current_state);
  
        if (!create_links(dc, init_params->num_virtual_links))
@@@ -746,7 -784,7 +784,7 @@@ fail
  static void disable_dangling_plane(struct dc *dc, struct dc_state *context)
  {
        int i, j;
-       struct dc_state *dangling_context = dc_create_state();
+       struct dc_state *dangling_context = dc_create_state(dc);
        struct dc_state *current_ctx;
  
        if (dangling_context == NULL)
@@@ -811,8 -849,6 +849,6 @@@ struct dc *dc_create(const struct dc_in
        if (dc->res_pool->dmcu != NULL)
                dc->versions.dmcu_version = dc->res_pool->dmcu->dmcu_version;
  
-       dc->config = init_params->flags;
        dc->build_id = DC_BUILD_ID;
  
        DC_LOG_DC("Display Core initialized\n");
@@@ -969,7 -1005,7 +1005,7 @@@ static bool context_changed
        return false;
  }
  
- bool dc_validate_seamless_boot_timing(struct dc *dc,
+ bool dc_validate_seamless_boot_timing(const struct dc *dc,
                                const struct dc_sink *sink,
                                struct dc_crtc_timing *crtc_timing)
  {
@@@ -1060,7 -1096,13 +1096,13 @@@ static enum dc_status dc_commit_state_n
        if (!dcb->funcs->is_accelerated_mode(dcb))
                dc->hwss.enable_accelerated_mode(dc, context);
  
-       dc->hwss.prepare_bandwidth(dc, context);
+       for (i = 0; i < context->stream_count; i++) {
+               if (context->streams[i]->apply_seamless_boot_optimization)
+                       dc->optimize_seamless_boot = true;
+       }
+       if (!dc->optimize_seamless_boot)
+               dc->hwss.prepare_bandwidth(dc, context);
  
        /* re-program planes for existing stream, in case we need to
         * free up plane resource for later use
  
        dc_enable_stereo(dc, context, dc_streams, context->stream_count);
  
-       /* pplib is notified if disp_num changed */
-       dc->hwss.optimize_bandwidth(dc, context);
+       if (!dc->optimize_seamless_boot)
+               /* pplib is notified if disp_num changed */
+               dc->hwss.optimize_bandwidth(dc, context);
  
        for (i = 0; i < context->stream_count; i++)
                context->streams[i]->mode_changed = false;
  
+       memset(&context->commit_hints, 0, sizeof(context->commit_hints));
        dc_release_state(dc->current_state);
  
        dc->current_state = context;
@@@ -1177,7 -1222,7 +1222,7 @@@ bool dc_post_update_surfaces_to_stream(
        int i;
        struct dc_state *context = dc->current_state;
  
-       if (dc->optimized_required == false)
+       if (!dc->optimized_required || dc->optimize_seamless_boot)
                return true;
  
        post_surface_trace(dc);
        return true;
  }
  
- struct dc_state *dc_create_state(void)
+ struct dc_state *dc_create_state(struct dc *dc)
  {
        struct dc_state *context = kzalloc(sizeof(struct dc_state),
                                           GFP_KERNEL);
  
        if (!context)
                return NULL;
+       /* Each context must have their own instance of VBA and in order to
+        * initialize and obtain IP and SOC the base DML instance from DC is
+        * initially copied into every context
+        */
+ #ifdef CONFIG_DRM_AMD_DC_DCN1_0
+       memcpy(&context->bw_ctx.dml, &dc->dml, sizeof(struct display_mode_lib));
+ #endif
  
        kref_init(&context->refcount);
        return context;
  }
  
+ struct dc_state *dc_copy_state(struct dc_state *src_ctx)
+ {
+       int i, j;
+       struct dc_state *new_ctx = kzalloc(sizeof(struct dc_state),
+                                          GFP_KERNEL);
+       if (!new_ctx)
+               return NULL;
+       memcpy(new_ctx, src_ctx, sizeof(struct dc_state));
+       for (i = 0; i < MAX_PIPES; i++) {
+                       struct pipe_ctx *cur_pipe = &new_ctx->res_ctx.pipe_ctx[i];
+                       if (cur_pipe->top_pipe)
+                               cur_pipe->top_pipe =  &new_ctx->res_ctx.pipe_ctx[cur_pipe->top_pipe->pipe_idx];
+                       if (cur_pipe->bottom_pipe)
+                               cur_pipe->bottom_pipe = &new_ctx->res_ctx.pipe_ctx[cur_pipe->bottom_pipe->pipe_idx];
+       }
+       for (i = 0; i < new_ctx->stream_count; i++) {
+                       dc_stream_retain(new_ctx->streams[i]);
+                       for (j = 0; j < new_ctx->stream_status[i].plane_count; j++)
+                               dc_plane_state_retain(
+                                       new_ctx->stream_status[i].plane_states[j]);
+       }
+       kref_init(&new_ctx->refcount);
+       return new_ctx;
+ }
  void dc_retain_state(struct dc_state *context)
  {
        kref_get(&context->refcount);
@@@ -1377,11 -1464,6 +1464,11 @@@ static enum surface_update_type det_sur
                return UPDATE_TYPE_FULL;
        }
  
 +      if (u->surface->force_full_update) {
 +              update_flags->bits.full_update = 1;
 +              return UPDATE_TYPE_FULL;
 +      }
 +
        type = get_plane_info_update_type(u);
        elevate_update_type(&overall_type, type);
  
@@@ -1666,6 -1748,7 +1753,7 @@@ static void commit_planes_do_stream_upd
                                continue;
  
                        if (stream_update->dpms_off) {
+                               dc->hwss.pipe_control_lock(dc, pipe_ctx, true);
                                if (*stream_update->dpms_off) {
                                        core_link_disable_stream(pipe_ctx, KEEP_ACQUIRED_RESOURCE);
                                        dc->hwss.optimize_bandwidth(dc, dc->current_state);
                                        dc->hwss.prepare_bandwidth(dc, dc->current_state);
                                        core_link_enable_stream(dc->current_state, pipe_ctx);
                                }
+                               dc->hwss.pipe_control_lock(dc, pipe_ctx, false);
                        }
  
                        if (stream_update->abm_level && pipe_ctx->stream_res.abm) {
@@@ -1700,7 -1784,16 +1789,16 @@@ static void commit_planes_for_stream(st
        int i, j;
        struct pipe_ctx *top_pipe_to_program = NULL;
  
-       if (update_type == UPDATE_TYPE_FULL) {
+       if (dc->optimize_seamless_boot && surface_count > 0) {
+               /* Optimize seamless boot flag keeps clocks and watermarks high until
+                * first flip. After first flip, optimization is required to lower
+                * bandwidth.
+                */
+               dc->optimize_seamless_boot = false;
+               dc->optimized_required = true;
+       }
+       if (update_type == UPDATE_TYPE_FULL && !dc->optimize_seamless_boot) {
                dc->hwss.prepare_bandwidth(dc, context);
                context_clock_trace(dc, context);
        }
@@@ -1800,21 -1893,13 +1898,21 @@@ void dc_commit_updates_for_stream(struc
        if (update_type >= UPDATE_TYPE_FULL) {
  
                /* initialize scratch memory for building context */
-               context = dc_create_state();
+               context = dc_create_state(dc);
                if (context == NULL) {
                        DC_ERROR("Failed to allocate new validate context!\n");
                        return;
                }
  
                dc_resource_state_copy_construct(state, context);
 +
 +              for (i = 0; i < dc->res_pool->pipe_count; i++) {
 +                      struct pipe_ctx *new_pipe = &context->res_ctx.pipe_ctx[i];
 +                      struct pipe_ctx *old_pipe = &dc->current_state->res_ctx.pipe_ctx[i];
 +
 +                      if (new_pipe->plane_state && new_pipe->plane_state != old_pipe->plane_state)
 +                              new_pipe->plane_state->force_full_update = true;
 +              }
        }
  
  
                dc->current_state = context;
                dc_release_state(old);
  
 +              for (i = 0; i < dc->res_pool->pipe_count; i++) {
 +                      struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
 +
 +                      if (pipe_ctx->plane_state && pipe_ctx->stream == stream)
 +                              pipe_ctx->plane_state->force_full_update = false;
 +              }
        }
        /*let's use current_state to update watermark etc*/
        if (update_type >= UPDATE_TYPE_FULL)
@@@ -2099,13 -2178,13 +2197,13 @@@ void dc_link_remove_remote_sink(struct 
  
  void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info)
  {
-       info->displayClock                              = (unsigned int)state->bw.dcn.clk.dispclk_khz;
-       info->engineClock                               = (unsigned int)state->bw.dcn.clk.dcfclk_khz;
-       info->memoryClock                               = (unsigned int)state->bw.dcn.clk.dramclk_khz;
-       info->maxSupportedDppClock              = (unsigned int)state->bw.dcn.clk.max_supported_dppclk_khz;
-       info->dppClock                                  = (unsigned int)state->bw.dcn.clk.dppclk_khz;
-       info->socClock                                  = (unsigned int)state->bw.dcn.clk.socclk_khz;
-       info->dcfClockDeepSleep                 = (unsigned int)state->bw.dcn.clk.dcfclk_deep_sleep_khz;
-       info->fClock                                    = (unsigned int)state->bw.dcn.clk.fclk_khz;
-       info->phyClock                                  = (unsigned int)state->bw.dcn.clk.phyclk_khz;
+       info->displayClock                              = (unsigned int)state->bw_ctx.bw.dcn.clk.dispclk_khz;
+       info->engineClock                               = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_khz;
+       info->memoryClock                               = (unsigned int)state->bw_ctx.bw.dcn.clk.dramclk_khz;
+       info->maxSupportedDppClock              = (unsigned int)state->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz;
+       info->dppClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.dppclk_khz;
+       info->socClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.socclk_khz;
+       info->dcfClockDeepSleep                 = (unsigned int)state->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz;
+       info->fClock                                    = (unsigned int)state->bw_ctx.bw.dcn.clk.fclk_khz;
+       info->phyClock                                  = (unsigned int)state->bw_ctx.bw.dcn.clk.phyclk_khz;
  }
index 0515095574e735e0535ee17ce3369168557c201e,70edd9ea5afeb33c4aec0df53e71e654fe813022..44e4b046558781fda3b63db42f12a65078ed16ed
  #include "inc/hw/dmcu.h"
  #include "dml/display_mode_lib.h"
  
- #define DC_VER "3.2.17"
+ #define DC_VER "3.2.27"
  
  #define MAX_SURFACES 3
+ #define MAX_PLANES 6
  #define MAX_STREAMS 6
  #define MAX_SINKS_PER_LINK 4
  
@@@ -53,6 -54,41 +54,41 @@@ struct dc_versions 
        struct dmcu_version dmcu_version;
  };
  
+ enum dc_plane_type {
+       DC_PLANE_TYPE_INVALID,
+       DC_PLANE_TYPE_DCE_RGB,
+       DC_PLANE_TYPE_DCE_UNDERLAY,
+       DC_PLANE_TYPE_DCN_UNIVERSAL,
+ };
+ struct dc_plane_cap {
+       enum dc_plane_type type;
+       uint32_t blends_with_above : 1;
+       uint32_t blends_with_below : 1;
+       uint32_t per_pixel_alpha : 1;
+       struct {
+               uint32_t argb8888 : 1;
+               uint32_t nv12 : 1;
+               uint32_t fp16 : 1;
+       } pixel_format_support;
+       // max upscaling factor x1000
+       // upscaling factors are always >= 1
+       // for example, 1080p -> 8K is 4.0, or 4000 raw value
+       struct {
+               uint32_t argb8888;
+               uint32_t nv12;
+               uint32_t fp16;
+       } max_upscale_factor;
+       // max downscale factor x1000
+       // downscale factors are always <= 1
+       // for example, 8K -> 1080p is 0.25, or 250 raw value
+       struct {
+               uint32_t argb8888;
+               uint32_t nv12;
+               uint32_t fp16;
+       } max_downscale_factor;
+ };
  struct dc_caps {
        uint32_t max_streams;
        uint32_t max_links;
        bool force_dp_tps4_for_cp2520;
        bool disable_dp_clk_share;
        bool psp_setup_panel_mode;
+       struct dc_plane_cap planes[MAX_PLANES];
  };
  
  struct dc_dcc_surface_param {
@@@ -164,6 -201,10 +201,10 @@@ struct dc_config 
        bool gpu_vm_support;
        bool disable_disp_pll_sharing;
        bool fbc_support;
+       bool optimize_edp_link_rate;
+       bool disable_fractional_pwm;
+       bool allow_seamless_boot_optimization;
+       bool power_down_display_on_boot;
  };
  
  enum visual_confirm {
@@@ -203,7 -244,59 +244,59 @@@ struct dc_clocks 
        int fclk_khz;
        int phyclk_khz;
        int dramclk_khz;
- };
+       bool p_state_change_support;
+ };
+ struct dc_bw_validation_profile {
+       bool enable;
+       unsigned long long total_ticks;
+       unsigned long long voltage_level_ticks;
+       unsigned long long watermark_ticks;
+       unsigned long long rq_dlg_ticks;
+       unsigned long long total_count;
+       unsigned long long skip_fast_count;
+       unsigned long long skip_pass_count;
+       unsigned long long skip_fail_count;
+ };
+ #define BW_VAL_TRACE_SETUP() \
+               unsigned long long end_tick = 0; \
+               unsigned long long voltage_level_tick = 0; \
+               unsigned long long watermark_tick = 0; \
+               unsigned long long start_tick = dc->debug.bw_val_profile.enable ? \
+                               dm_get_timestamp(dc->ctx) : 0
+ #define BW_VAL_TRACE_COUNT() \
+               if (dc->debug.bw_val_profile.enable) \
+                       dc->debug.bw_val_profile.total_count++
+ #define BW_VAL_TRACE_SKIP(status) \
+               if (dc->debug.bw_val_profile.enable) { \
+                       if (!voltage_level_tick) \
+                               voltage_level_tick = dm_get_timestamp(dc->ctx); \
+                       dc->debug.bw_val_profile.skip_ ## status ## _count++; \
+               }
+ #define BW_VAL_TRACE_END_VOLTAGE_LEVEL() \
+               if (dc->debug.bw_val_profile.enable) \
+                       voltage_level_tick = dm_get_timestamp(dc->ctx)
+ #define BW_VAL_TRACE_END_WATERMARKS() \
+               if (dc->debug.bw_val_profile.enable) \
+                       watermark_tick = dm_get_timestamp(dc->ctx)
+ #define BW_VAL_TRACE_FINISH() \
+               if (dc->debug.bw_val_profile.enable) { \
+                       end_tick = dm_get_timestamp(dc->ctx); \
+                       dc->debug.bw_val_profile.total_ticks += end_tick - start_tick; \
+                       dc->debug.bw_val_profile.voltage_level_ticks += voltage_level_tick - start_tick; \
+                       if (watermark_tick) { \
+                               dc->debug.bw_val_profile.watermark_ticks += watermark_tick - voltage_level_tick; \
+                               dc->debug.bw_val_profile.rq_dlg_ticks += end_tick - watermark_tick; \
+                       } \
+               }
  
  struct dc_debug_options {
        enum visual_confirm visual_confirm;
        bool skip_detection_link_training;
        unsigned int force_odm_combine; //bit vector based on otg inst
        unsigned int force_fclk_khz;
+       bool disable_tri_buf;
+       struct dc_bw_validation_profile bw_val_profile;
  };
  
  struct dc_debug_data {
        uint32_t auxErrorCount;
  };
  
+ struct dc_bounding_box_overrides {
+       int sr_exit_time_ns;
+       int sr_enter_plus_exit_time_ns;
+       int urgent_latency_ns;
+       int percent_of_ideal_drambw;
+       int dram_clock_change_latency_ns;
+ };
  struct dc_state;
  struct resource_pool;
  struct dce_hwseq;
@@@ -274,6 -377,7 +377,7 @@@ struct dc 
        struct dc_cap_funcs cap_funcs;
        struct dc_config config;
        struct dc_debug_options debug;
+       struct dc_bounding_box_overrides bb_overrides;
        struct dc_context *ctx;
  
        uint8_t link_count;
        struct hw_sequencer_funcs hwss;
        struct dce_hwseq *hwseq;
  
+       /* Require to optimize clocks and bandwidth for added/removed planes */
        bool optimized_required;
  
+       /* Require to maintain clocks and bandwidth for UEFI enabled HW */
+       bool optimize_seamless_boot;
        /* FBC compressor */
        struct compressor *fbc_compressor;
  
@@@ -327,6 -435,7 +435,7 @@@ struct dc_init_data 
        struct hw_asic_id asic_id;
        void *driver; /* ctx */
        struct cgs_device *cgs_device;
+       struct dc_bounding_box_overrides bb_overrides;
  
        int num_virtual_links;
        /*
@@@ -503,9 -612,6 +612,9 @@@ struct dc_plane_state 
        struct dc_plane_status status;
        struct dc_context *ctx;
  
 +      /* HACK: Workaround for forcing full reprogramming under some conditions */
 +      bool force_full_update;
 +
        /* private to dc_surface.c */
        enum dc_irq_source irq_source;
        struct kref refcount;
@@@ -597,7 -703,7 +706,7 @@@ struct dc_validation_set 
        uint8_t plane_count;
  };
  
- bool dc_validate_seamless_boot_timing(struct dc *dc,
+ bool dc_validate_seamless_boot_timing(const struct dc *dc,
                                const struct dc_sink *sink,
                                struct dc_crtc_timing *crtc_timing);
  
@@@ -605,9 -711,14 +714,14 @@@ enum dc_status dc_validate_plane(struc
  
  void get_clock_requirements_for_state(struct dc_state *state, struct AsicStateEx *info);
  
+ /*
+  * fast_validate: we return after determining if we can support the new state,
+  * but before we populate the programming info
+  */
  enum dc_status dc_validate_global_state(
                struct dc *dc,
-               struct dc_state *new_ctx);
+               struct dc_state *new_ctx,
+               bool fast_validate);
  
  
  void dc_resource_state_construct(
@@@ -636,7 -747,8 +750,8 @@@ void dc_resource_state_destruct(struct 
  bool dc_commit_state(struct dc *dc, struct dc_state *context);
  
  
- struct dc_state *dc_create_state(void);
+ struct dc_state *dc_create_state(struct dc *dc);
+ struct dc_state *dc_copy_state(struct dc_state *src_ctx);
  void dc_retain_state(struct dc_state *context);
  void dc_release_state(struct dc_state *context);
  
@@@ -648,9 -760,16 +763,16 @@@ struct dpcd_caps 
        union dpcd_rev dpcd_rev;
        union max_lane_count max_ln_count;
        union max_down_spread max_down_spread;
+       union dprx_feature dprx_feature;
+       /* valid only for eDP v1.4 or higher*/
+       uint8_t edp_supported_link_rates_count;
+       enum dc_link_rate edp_supported_link_rates[8];
  
        /* dongle type (DP converter, CV smart dongle) */
        enum display_dongle_type dongle_type;
+       /* branch device or sink device */
+       bool is_branch_dev;
        /* Dongle's downstream count. */
        union sink_count sink_count;
        /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER,
        int8_t branch_dev_name[6];
        int8_t branch_hw_revision;
        int8_t branch_fw_revision[2];
-       uint8_t link_rate_set;
  
        bool allow_invalid_MSA_timing_param;
        bool panel_mode_edp;
        bool dpcd_display_control_capable;
+       bool ext_receiver_cap_field_present;
  };
  
  #include "dc_link.h"
index 4fe3664fb49508e7f9c07ddc69f5b610fd884d1d,937b5cffd7efaf3b9ea3b1410394c5e498d2d4bf..bd33c47183fc800c6621dbf7ebf6cf71a0581015
@@@ -171,31 -171,25 +171,31 @@@ static void submit_channel_request
                 (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT)));
        if (REG(AUXN_IMPCAL)) {
                /* clear_aux_error */
-               REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
-                               1,
-                               0);
+               REG_UPDATE_SEQ_2(AUXN_IMPCAL,
+                               AUXN_CALOUT_ERROR_AK, 1,
+                               AUXN_CALOUT_ERROR_AK, 0);
  
-               REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
-                               1,
-                               0);
+               REG_UPDATE_SEQ_2(AUXP_IMPCAL,
+                               AUXP_CALOUT_ERROR_AK, 1,
+                               AUXP_CALOUT_ERROR_AK, 0);
  
                /* force_default_calibrate */
-               REG_UPDATE_1BY1_2(AUXN_IMPCAL,
+               REG_UPDATE_SEQ_2(AUXN_IMPCAL,
                                AUXN_IMPCAL_ENABLE, 1,
                                AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
  
                /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
  
-               REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
-                               1,
-                               0);
+               REG_UPDATE_SEQ_2(AUXP_IMPCAL,
+                               AUXP_IMPCAL_OVERRIDE_ENABLE, 1,
+                               AUXP_IMPCAL_OVERRIDE_ENABLE, 0);
        }
 +
 +      REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
 +
 +      REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
 +                              10, aux110->timeout_period/10);
 +
        /* set the delay and the number of bytes to write */
  
        /* The length include
                }
        }
  
 -      REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
 -      REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
 -                              10, aux110->timeout_period/10);
        REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
  }
  
@@@ -270,7 -267,7 +270,7 @@@ static int read_channel_reply(struct dc
        if (!bytes_replied)
                return -1;
  
-       REG_UPDATE_1BY1_3(AUX_SW_DATA,
+       REG_UPDATE_SEQ_3(AUX_SW_DATA,
                          AUX_SW_INDEX, 0,
                          AUX_SW_AUTOINCREMENT_DISABLE, 1,
                          AUX_SW_DATA_RW, 1);
@@@ -320,9 -317,10 +320,10 @@@ static enum aux_channel_operation_resul
        *returned_bytes = 0;
  
        /* poll to make sure that SW_DONE is asserted */
-       value = REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
+       REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
                                10, aux110->timeout_period/10);
  
+       value = REG_READ(AUX_SW_STATUS);
        /* in case HPD is LOW, exit AUX transaction */
        if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
                return AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
@@@ -377,7 -375,6 +378,6 @@@ static bool acquire
        struct dce_aux *engine,
        struct ddc *ddc)
  {
        enum gpio_result result;
  
        if (!is_engine_available(engine))
@@@ -442,12 -439,12 +442,12 @@@ static enum i2caux_transaction_action i
        return I2CAUX_TRANSACTION_ACTION_DP_READ;
  }
  
- int dce_aux_transfer(struct ddc_service *ddc,
-               struct aux_payload *payload)
+ int dce_aux_transfer_raw(struct ddc_service *ddc,
+               struct aux_payload *payload,
+               enum aux_channel_operation_result *operation_result)
  {
        struct ddc *ddc_pin = ddc->ddc_pin;
        struct dce_aux *aux_engine;
-       enum aux_channel_operation_result operation_result;
        struct aux_request_transaction_data aux_req;
        struct aux_reply_transaction_data aux_rep;
        uint8_t returned_bytes = 0;
        memset(&aux_rep, 0, sizeof(aux_rep));
  
        aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
-       acquire(aux_engine, ddc_pin);
+       if (!acquire(aux_engine, ddc_pin))
+               return -1;
  
        if (payload->i2c_over_aux)
                aux_req.type = AUX_TRANSACTION_TYPE_I2C;
        aux_req.data = payload->data;
  
        submit_channel_request(aux_engine, &aux_req);
-       operation_result = get_channel_status(aux_engine, &returned_bytes);
-       switch (operation_result) {
-       case AUX_CHANNEL_OPERATION_SUCCEEDED:
-               res = read_channel_reply(aux_engine, payload->length,
-                                                       payload->data, payload->reply,
-                                                       &status);
-               break;
-       case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
-               res = 0;
-               break;
-       case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
-       case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
-       case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+       *operation_result = get_channel_status(aux_engine, &returned_bytes);
+       if (*operation_result == AUX_CHANNEL_OPERATION_SUCCEEDED) {
+               read_channel_reply(aux_engine, payload->length,
+                                        payload->data, payload->reply,
+                                        &status);
+               res = returned_bytes;
+       } else {
                res = -1;
-               break;
        }
        release_engine(aux_engine);
        return res;
  }
  
- #define AUX_RETRY_MAX 7
+ #define AUX_MAX_RETRIES 7
+ #define AUX_MAX_DEFER_RETRIES 7
+ #define AUX_MAX_I2C_DEFER_RETRIES 7
+ #define AUX_MAX_INVALID_REPLY_RETRIES 2
+ #define AUX_MAX_TIMEOUT_RETRIES 3
  
  bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
                struct aux_payload *payload)
        int i, ret = 0;
        uint8_t reply;
        bool payload_reply = true;
+       enum aux_channel_operation_result operation_result;
+       int aux_ack_retries = 0,
+               aux_defer_retries = 0,
+               aux_i2c_defer_retries = 0,
+               aux_timeout_retries = 0,
+               aux_invalid_reply_retries = 0;
  
        if (!payload->reply) {
                payload_reply = false;
                payload->reply = &reply;
        }
  
-       for (i = 0; i < AUX_RETRY_MAX; i++) {
-               ret = dce_aux_transfer(ddc, payload);
-               if (ret >= 0) {
-                       if (*payload->reply == 0) {
-                               if (!payload_reply)
-                                       payload->reply = NULL;
-                               return true;
+       for (i = 0; i < AUX_MAX_RETRIES; i++) {
+               ret = dce_aux_transfer_raw(ddc, payload, &operation_result);
+               switch (operation_result) {
+               case AUX_CHANNEL_OPERATION_SUCCEEDED:
+                       aux_timeout_retries = 0;
+                       aux_invalid_reply_retries = 0;
+                       switch (*payload->reply) {
+                       case AUX_TRANSACTION_REPLY_AUX_ACK:
+                               if (!payload->write && payload->length != ret) {
+                                       if (++aux_ack_retries >= AUX_MAX_RETRIES)
+                                               goto fail;
+                                       else
+                                               udelay(300);
+                               } else
+                                       return true;
+                       break;
+                       case AUX_TRANSACTION_REPLY_AUX_DEFER:
+                       case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_NACK:
+                       case AUX_TRANSACTION_REPLY_I2C_OVER_AUX_DEFER:
+                               if (++aux_defer_retries >= AUX_MAX_DEFER_RETRIES)
+                                       goto fail;
+                               break;
+                       case AUX_TRANSACTION_REPLY_I2C_DEFER:
+                               aux_defer_retries = 0;
+                               if (++aux_i2c_defer_retries >= AUX_MAX_I2C_DEFER_RETRIES)
+                                       goto fail;
+                               break;
+                       case AUX_TRANSACTION_REPLY_AUX_NACK:
+                       case AUX_TRANSACTION_REPLY_HPD_DISCON:
+                       default:
+                               goto fail;
                        }
-               }
+                       break;
+               case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+                       if (++aux_invalid_reply_retries >= AUX_MAX_INVALID_REPLY_RETRIES)
+                               goto fail;
+                       else
+                               udelay(400);
+                       break;
+               case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+                       if (++aux_timeout_retries >= AUX_MAX_TIMEOUT_RETRIES)
+                               goto fail;
+                       else {
+                               /*
+                                * DP 1.4, 2.8.2:  AUX Transaction Response/Reply Timeouts
+                                * According to the DP spec there should be 3 retries total
+                                * with a 400us wait inbetween each. Hardware already waits
+                                * for 550us therefore no wait is required here.
+                                */
+                       }
+                       break;
  
-               udelay(1000);
+               case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+               case AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN:
+               default:
+                       goto fail;
+               }
        }
+ fail:
+       if (!payload_reply)
+               payload->reply = NULL;
        return false;
  }
index e28ed6a00ff4236ffaef4346528dc1ecbb179543,aab5f0c34584c1587f344f1ee2bff3a9a98c2020..ce6a26d189b06fdabe399e812e4510fc5261a09a
@@@ -71,11 -71,11 +71,11 @@@ enum {     /* This is the timeout as define
         * at most within ~240usec. That means,
         * increasing this timeout will not affect normal operation,
         * and we'll timeout after
 -       * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec.
 +       * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 2400usec.
         * This timeout is especially important for
 -       * resume from S3 and CTS.
 +       * converters, resume from S3, and CTS.
         */
 -      SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4
 +      SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 6
  };
  
  struct dce_aux {
@@@ -123,8 -123,9 +123,9 @@@ bool dce110_aux_engine_acquire
        struct dce_aux *aux_engine,
        struct ddc *ddc);
  
- int dce_aux_transfer(struct ddc_service *ddc,
-               struct aux_payload *cmd);
+ int dce_aux_transfer_raw(struct ddc_service *ddc,
+               struct aux_payload *cmd,
+               enum aux_channel_operation_result *operation_result);
  
  bool dce_aux_transfer_with_retries(struct ddc_service *ddc,
                struct aux_payload *cmd);
index 8bdb4a3bd7bf130d7f2e8fc83dcf4c8eaeb8db74,361a01a08c1856532a29460e5e21e8a63cbaaf24..3d400905100be73047434f00fcafd7a58819d6c2
@@@ -61,6 -61,7 +61,7 @@@ struct syncobj_wait_entry 
        struct task_struct *task;
        struct dma_fence *fence;
        struct dma_fence_cb fence_cb;
+       u64    point;
  };
  
  static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
@@@ -95,6 -96,8 +96,8 @@@ EXPORT_SYMBOL(drm_syncobj_find)
  static void drm_syncobj_fence_add_wait(struct drm_syncobj *syncobj,
                                       struct syncobj_wait_entry *wait)
  {
+       struct dma_fence *fence;
        if (wait->fence)
                return;
  
         * have the lock, try one more time just to be sure we don't add a
         * callback when a fence has already been set.
         */
-       if (syncobj->fence)
-               wait->fence = dma_fence_get(
-                       rcu_dereference_protected(syncobj->fence, 1));
-       else
+       fence = dma_fence_get(rcu_dereference_protected(syncobj->fence, 1));
+       if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
+               dma_fence_put(fence);
                list_add_tail(&wait->node, &syncobj->cb_list);
+       } else if (!fence) {
+               wait->fence = dma_fence_get_stub();
+       } else {
+               wait->fence = fence;
+       }
        spin_unlock(&syncobj->lock);
  }
  
@@@ -122,6 -129,44 +129,44 @@@ static void drm_syncobj_remove_wait(str
        spin_unlock(&syncobj->lock);
  }
  
+ /**
+  * drm_syncobj_add_point - add new timeline point to the syncobj
+  * @syncobj: sync object to add timeline point do
+  * @chain: chain node to use to add the point
+  * @fence: fence to encapsulate in the chain node
+  * @point: sequence number to use for the point
+  *
+  * Add the chain node as new timeline point to the syncobj.
+  */
+ void drm_syncobj_add_point(struct drm_syncobj *syncobj,
+                          struct dma_fence_chain *chain,
+                          struct dma_fence *fence,
+                          uint64_t point)
+ {
+       struct syncobj_wait_entry *cur, *tmp;
+       struct dma_fence *prev;
+       dma_fence_get(fence);
+       spin_lock(&syncobj->lock);
+       prev = drm_syncobj_fence_get(syncobj);
+       /* You are adding an unorder point to timeline, which could cause payload returned from query_ioctl is 0! */
+       if (prev && prev->seqno >= point)
+               DRM_ERROR("You are adding an unorder point to timeline!\n");
+       dma_fence_chain_init(chain, prev, fence, point);
+       rcu_assign_pointer(syncobj->fence, &chain->base);
+       list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
+               syncobj_wait_syncobj_func(syncobj, cur);
+       spin_unlock(&syncobj->lock);
+       /* Walk the chain once to trigger garbage collection */
+       dma_fence_chain_for_each(fence, prev);
+       dma_fence_put(prev);
+ }
+ EXPORT_SYMBOL(drm_syncobj_add_point);
  /**
   * drm_syncobj_replace_fence - replace fence in a sync object.
   * @syncobj: Sync object to replace fence in
@@@ -145,10 -190,8 +190,8 @@@ void drm_syncobj_replace_fence(struct d
        rcu_assign_pointer(syncobj->fence, fence);
  
        if (fence != old_fence) {
-               list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node) {
-                       list_del_init(&cur->node);
+               list_for_each_entry_safe(cur, tmp, &syncobj->cb_list, node)
                        syncobj_wait_syncobj_func(syncobj, cur);
-               }
        }
  
        spin_unlock(&syncobj->lock);
@@@ -171,6 -214,8 +214,8 @@@ static void drm_syncobj_assign_null_han
        dma_fence_put(fence);
  }
  
+ /* 5s default for wait submission */
+ #define DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT 5000000000ULL
  /**
   * drm_syncobj_find_fence - lookup and reference the fence in a sync object
   * @file_private: drm file private pointer
@@@ -191,16 -236,58 +236,58 @@@ int drm_syncobj_find_fence(struct drm_f
                           struct dma_fence **fence)
  {
        struct drm_syncobj *syncobj = drm_syncobj_find(file_private, handle);
-       int ret = 0;
+       struct syncobj_wait_entry wait;
+       u64 timeout = nsecs_to_jiffies64(DRM_SYNCOBJ_WAIT_FOR_SUBMIT_TIMEOUT);
+       int ret;
  
        if (!syncobj)
                return -ENOENT;
  
        *fence = drm_syncobj_fence_get(syncobj);
-       if (!*fence) {
+       drm_syncobj_put(syncobj);
+       if (*fence) {
+               ret = dma_fence_chain_find_seqno(fence, point);
+               if (!ret)
+                       return 0;
+               dma_fence_put(*fence);
+       } else {
                ret = -EINVAL;
        }
-       drm_syncobj_put(syncobj);
+       if (!(flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT))
+               return ret;
+       memset(&wait, 0, sizeof(wait));
+       wait.task = current;
+       wait.point = point;
+       drm_syncobj_fence_add_wait(syncobj, &wait);
+       do {
+               set_current_state(TASK_INTERRUPTIBLE);
+               if (wait.fence) {
+                       ret = 0;
+                       break;
+               }
+                 if (timeout == 0) {
+                         ret = -ETIME;
+                         break;
+                 }
+               if (signal_pending(current)) {
+                       ret = -ERESTARTSYS;
+                       break;
+               }
+                 timeout = schedule_timeout(timeout);
+       } while (1);
+       __set_current_state(TASK_RUNNING);
+       *fence = wait.fence;
+       if (wait.node.next)
+               drm_syncobj_remove_wait(syncobj, &wait);
        return ret;
  }
  EXPORT_SYMBOL(drm_syncobj_find_fence);
@@@ -388,19 -475,20 +475,19 @@@ static int drm_syncobj_fd_to_handle(str
                                    int fd, u32 *handle)
  {
        struct drm_syncobj *syncobj;
 -      struct file *file;
 +      struct fd f = fdget(fd);
        int ret;
  
 -      file = fget(fd);
 -      if (!file)
 +      if (!f.file)
                return -EINVAL;
  
 -      if (file->f_op != &drm_syncobj_file_fops) {
 -              fput(file);
 +      if (f.file->f_op != &drm_syncobj_file_fops) {
 +              fdput(f);
                return -EINVAL;
        }
  
        /* take a reference to put in the idr */
 -      syncobj = file->private_data;
 +      syncobj = f.file->private_data;
        drm_syncobj_get(syncobj);
  
        idr_preload(GFP_KERNEL);
        } else
                drm_syncobj_put(syncobj);
  
 -      fput(file);
 +      fdput(f);
        return ret;
  }
  
@@@ -592,6 -680,80 +679,80 @@@ drm_syncobj_fd_to_handle_ioctl(struct d
                                        &args->handle);
  }
  
+ static int drm_syncobj_transfer_to_timeline(struct drm_file *file_private,
+                                           struct drm_syncobj_transfer *args)
+ {
+       struct drm_syncobj *timeline_syncobj = NULL;
+       struct dma_fence *fence;
+       struct dma_fence_chain *chain;
+       int ret;
+       timeline_syncobj = drm_syncobj_find(file_private, args->dst_handle);
+       if (!timeline_syncobj) {
+               return -ENOENT;
+       }
+       ret = drm_syncobj_find_fence(file_private, args->src_handle,
+                                    args->src_point, args->flags,
+                                    &fence);
+       if (ret)
+               goto err;
+       chain = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
+       if (!chain) {
+               ret = -ENOMEM;
+               goto err1;
+       }
+       drm_syncobj_add_point(timeline_syncobj, chain, fence, args->dst_point);
+ err1:
+       dma_fence_put(fence);
+ err:
+       drm_syncobj_put(timeline_syncobj);
+       return ret;
+ }
+ static int
+ drm_syncobj_transfer_to_binary(struct drm_file *file_private,
+                              struct drm_syncobj_transfer *args)
+ {
+       struct drm_syncobj *binary_syncobj = NULL;
+       struct dma_fence *fence;
+       int ret;
+       binary_syncobj = drm_syncobj_find(file_private, args->dst_handle);
+       if (!binary_syncobj)
+               return -ENOENT;
+       ret = drm_syncobj_find_fence(file_private, args->src_handle,
+                                    args->src_point, args->flags, &fence);
+       if (ret)
+               goto err;
+       drm_syncobj_replace_fence(binary_syncobj, fence);
+       dma_fence_put(fence);
+ err:
+       drm_syncobj_put(binary_syncobj);
+       return ret;
+ }
+ int
+ drm_syncobj_transfer_ioctl(struct drm_device *dev, void *data,
+                          struct drm_file *file_private)
+ {
+       struct drm_syncobj_transfer *args = data;
+       int ret;
+       if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
+               return -EOPNOTSUPP;
+       if (args->pad)
+               return -EINVAL;
+       if (args->dst_point)
+               ret = drm_syncobj_transfer_to_timeline(file_private, args);
+       else
+               ret = drm_syncobj_transfer_to_binary(file_private, args);
+       return ret;
+ }
  static void syncobj_wait_fence_func(struct dma_fence *fence,
                                    struct dma_fence_cb *cb)
  {
  static void syncobj_wait_syncobj_func(struct drm_syncobj *syncobj,
                                      struct syncobj_wait_entry *wait)
  {
+       struct dma_fence *fence;
        /* This happens inside the syncobj lock */
-       wait->fence = dma_fence_get(rcu_dereference_protected(syncobj->fence,
-                                                             lockdep_is_held(&syncobj->lock)));
+       fence = rcu_dereference_protected(syncobj->fence,
+                                         lockdep_is_held(&syncobj->lock));
+       dma_fence_get(fence);
+       if (!fence || dma_fence_chain_find_seqno(&fence, wait->point)) {
+               dma_fence_put(fence);
+               return;
+       } else if (!fence) {
+               wait->fence = dma_fence_get_stub();
+       } else {
+               wait->fence = fence;
+       }
        wake_up_process(wait->task);
+       list_del_init(&wait->node);
  }
  
  static signed long drm_syncobj_array_wait_timeout(struct drm_syncobj **syncobjs,
+                                                 void __user *user_points,
                                                  uint32_t count,
                                                  uint32_t flags,
                                                  signed long timeout,
  {
        struct syncobj_wait_entry *entries;
        struct dma_fence *fence;
+       uint64_t *points;
        uint32_t signaled_count, i;
  
-       entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
-       if (!entries)
+       points = kmalloc_array(count, sizeof(*points), GFP_KERNEL);
+       if (points == NULL)
                return -ENOMEM;
  
+       if (!user_points) {
+               memset(points, 0, count * sizeof(uint64_t));
+       } else if (copy_from_user(points, user_points,
+                                 sizeof(uint64_t) * count)) {
+               timeout = -EFAULT;
+               goto err_free_points;
+       }
+       entries = kcalloc(count, sizeof(*entries), GFP_KERNEL);
+       if (!entries) {
+               timeout = -ENOMEM;
+               goto err_free_points;
+       }
        /* Walk the list of sync objects and initialize entries.  We do
         * this up-front so that we can properly return -EINVAL if there is
         * a syncobj with a missing fence and then never have the chance of
         */
        signaled_count = 0;
        for (i = 0; i < count; ++i) {
+               struct dma_fence *fence;
                entries[i].task = current;
-               entries[i].fence = drm_syncobj_fence_get(syncobjs[i]);
-               if (!entries[i].fence) {
+               entries[i].point = points[i];
+               fence = drm_syncobj_fence_get(syncobjs[i]);
+               if (!fence || dma_fence_chain_find_seqno(&fence, points[i])) {
+                       dma_fence_put(fence);
                        if (flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT) {
                                continue;
                        } else {
                        }
                }
  
-               if (dma_fence_is_signaled(entries[i].fence)) {
+               if (fence)
+                       entries[i].fence = fence;
+               else
+                       entries[i].fence = dma_fence_get_stub();
+               if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
+                   dma_fence_is_signaled(entries[i].fence)) {
                        if (signaled_count == 0 && idx)
                                *idx = i;
                        signaled_count++;
                        if (!fence)
                                continue;
  
-                       if (dma_fence_is_signaled(fence) ||
+                       if ((flags & DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE) ||
+                           dma_fence_is_signaled(fence) ||
                            (!entries[i].fence_cb.func &&
                             dma_fence_add_callback(fence,
                                                    &entries[i].fence_cb,
@@@ -720,6 -922,9 +921,9 @@@ cleanup_entries
        }
        kfree(entries);
  
+ err_free_points:
+       kfree(points);
        return timeout;
  }
  
   *
   * Calculate the timeout in jiffies from an absolute time in sec/nsec.
   */
- static signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
+ signed long drm_timeout_abs_to_jiffies(int64_t timeout_nsec)
  {
        ktime_t abs_timeout, now;
        u64 timeout_ns, timeout_jiffies64;
  
        return timeout_jiffies64 + 1;
  }
+ EXPORT_SYMBOL(drm_timeout_abs_to_jiffies);
  
  static int drm_syncobj_array_wait(struct drm_device *dev,
                                  struct drm_file *file_private,
                                  struct drm_syncobj_wait *wait,
-                                 struct drm_syncobj **syncobjs)
+                                 struct drm_syncobj_timeline_wait *timeline_wait,
+                                 struct drm_syncobj **syncobjs, bool timeline)
  {
-       signed long timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
+       signed long timeout = 0;
        uint32_t first = ~0;
  
-       timeout = drm_syncobj_array_wait_timeout(syncobjs,
-                                                wait->count_handles,
-                                                wait->flags,
-                                                timeout, &first);
-       if (timeout < 0)
-               return timeout;
-       wait->first_signaled = first;
+       if (!timeline) {
+               timeout = drm_timeout_abs_to_jiffies(wait->timeout_nsec);
+               timeout = drm_syncobj_array_wait_timeout(syncobjs,
+                                                        NULL,
+                                                        wait->count_handles,
+                                                        wait->flags,
+                                                        timeout, &first);
+               if (timeout < 0)
+                       return timeout;
+               wait->first_signaled = first;
+       } else {
+               timeout = drm_timeout_abs_to_jiffies(timeline_wait->timeout_nsec);
+               timeout = drm_syncobj_array_wait_timeout(syncobjs,
+                                                        u64_to_user_ptr(timeline_wait->points),
+                                                        timeline_wait->count_handles,
+                                                        timeline_wait->flags,
+                                                        timeout, &first);
+               if (timeout < 0)
+                       return timeout;
+               timeline_wait->first_signaled = first;
+       }
        return 0;
  }
  
@@@ -856,13 -1076,48 +1075,48 @@@ drm_syncobj_wait_ioctl(struct drm_devic
                return ret;
  
        ret = drm_syncobj_array_wait(dev, file_private,
-                                    args, syncobjs);
+                                    args, NULL, syncobjs, false);
+       drm_syncobj_array_free(syncobjs, args->count_handles);
+       return ret;
+ }
+ int
+ drm_syncobj_timeline_wait_ioctl(struct drm_device *dev, void *data,
+                               struct drm_file *file_private)
+ {
+       struct drm_syncobj_timeline_wait *args = data;
+       struct drm_syncobj **syncobjs;
+       int ret = 0;
+       if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
+               return -EOPNOTSUPP;
+       if (args->flags & ~(DRM_SYNCOBJ_WAIT_FLAGS_WAIT_ALL |
+                           DRM_SYNCOBJ_WAIT_FLAGS_WAIT_FOR_SUBMIT |
+                           DRM_SYNCOBJ_WAIT_FLAGS_WAIT_AVAILABLE))
+               return -EINVAL;
+       if (args->count_handles == 0)
+               return -EINVAL;
+       ret = drm_syncobj_array_find(file_private,
+                                    u64_to_user_ptr(args->handles),
+                                    args->count_handles,
+                                    &syncobjs);
+       if (ret < 0)
+               return ret;
+       ret = drm_syncobj_array_wait(dev, file_private,
+                                    NULL, args, syncobjs, true);
  
        drm_syncobj_array_free(syncobjs, args->count_handles);
  
        return ret;
  }
  
  int
  drm_syncobj_reset_ioctl(struct drm_device *dev, void *data,
                        struct drm_file *file_private)
@@@ -928,3 -1183,138 +1182,138 @@@ drm_syncobj_signal_ioctl(struct drm_dev
  
        return ret;
  }
+ int
+ drm_syncobj_timeline_signal_ioctl(struct drm_device *dev, void *data,
+                                 struct drm_file *file_private)
+ {
+       struct drm_syncobj_timeline_array *args = data;
+       struct drm_syncobj **syncobjs;
+       struct dma_fence_chain **chains;
+       uint64_t *points;
+       uint32_t i, j;
+       int ret;
+       if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
+               return -EOPNOTSUPP;
+       if (args->pad != 0)
+               return -EINVAL;
+       if (args->count_handles == 0)
+               return -EINVAL;
+       ret = drm_syncobj_array_find(file_private,
+                                    u64_to_user_ptr(args->handles),
+                                    args->count_handles,
+                                    &syncobjs);
+       if (ret < 0)
+               return ret;
+       points = kmalloc_array(args->count_handles, sizeof(*points),
+                              GFP_KERNEL);
+       if (!points) {
+               ret = -ENOMEM;
+               goto out;
+       }
+       if (!u64_to_user_ptr(args->points)) {
+               memset(points, 0, args->count_handles * sizeof(uint64_t));
+       } else if (copy_from_user(points, u64_to_user_ptr(args->points),
+                                 sizeof(uint64_t) * args->count_handles)) {
+               ret = -EFAULT;
+               goto err_points;
+       }
+       chains = kmalloc_array(args->count_handles, sizeof(void *), GFP_KERNEL);
+       if (!chains) {
+               ret = -ENOMEM;
+               goto err_points;
+       }
+       for (i = 0; i < args->count_handles; i++) {
+               chains[i] = kzalloc(sizeof(struct dma_fence_chain), GFP_KERNEL);
+               if (!chains[i]) {
+                       for (j = 0; j < i; j++)
+                               kfree(chains[j]);
+                       ret = -ENOMEM;
+                       goto err_chains;
+               }
+       }
+       for (i = 0; i < args->count_handles; i++) {
+               struct dma_fence *fence = dma_fence_get_stub();
+               drm_syncobj_add_point(syncobjs[i], chains[i],
+                                     fence, points[i]);
+               dma_fence_put(fence);
+       }
+ err_chains:
+       kfree(chains);
+ err_points:
+       kfree(points);
+ out:
+       drm_syncobj_array_free(syncobjs, args->count_handles);
+       return ret;
+ }
+ int drm_syncobj_query_ioctl(struct drm_device *dev, void *data,
+                           struct drm_file *file_private)
+ {
+       struct drm_syncobj_timeline_array *args = data;
+       struct drm_syncobj **syncobjs;
+       uint64_t __user *points = u64_to_user_ptr(args->points);
+       uint32_t i;
+       int ret;
+       if (!drm_core_check_feature(dev, DRIVER_SYNCOBJ_TIMELINE))
+               return -EOPNOTSUPP;
+       if (args->pad != 0)
+               return -EINVAL;
+       if (args->count_handles == 0)
+               return -EINVAL;
+       ret = drm_syncobj_array_find(file_private,
+                                    u64_to_user_ptr(args->handles),
+                                    args->count_handles,
+                                    &syncobjs);
+       if (ret < 0)
+               return ret;
+       for (i = 0; i < args->count_handles; i++) {
+               struct dma_fence_chain *chain;
+               struct dma_fence *fence;
+               uint64_t point;
+               fence = drm_syncobj_fence_get(syncobjs[i]);
+               chain = to_dma_fence_chain(fence);
+               if (chain) {
+                       struct dma_fence *iter, *last_signaled = NULL;
+                       dma_fence_chain_for_each(iter, fence) {
+                               if (!iter)
+                                       break;
+                               dma_fence_put(last_signaled);
+                               last_signaled = dma_fence_get(iter);
+                               if (!to_dma_fence_chain(last_signaled)->prev_seqno)
+                                       /* It is most likely that timeline has
+                                        * unorder points. */
+                                       break;
+                       }
+                       point = dma_fence_is_signaled(last_signaled) ?
+                               last_signaled->seqno :
+                               to_dma_fence_chain(last_signaled)->prev_seqno;
+                       dma_fence_put(last_signaled);
+               } else {
+                       point = 0;
+               }
+               ret = copy_to_user(&points[i], &point, sizeof(uint64_t));
+               ret = ret ? -EFAULT : 0;
+               if (ret)
+                       break;
+       }
+       drm_syncobj_array_free(syncobjs, args->count_handles);
+       return ret;
+ }
index 3c724cc949a5fbb41e2f702e69f1c5d6767e1fe2,db88ce4d5509b09bab0f7191502df190936283f6..ad01c92aaf74881aae3cff9740359be36063732c
@@@ -42,6 -42,7 +42,7 @@@
  #include "i915_drv.h"
  #include "i915_gem_clflush.h"
  #include "i915_gemfs.h"
+ #include "i915_globals.h"
  #include "i915_reset.h"
  #include "i915_trace.h"
  #include "i915_vgpu.h"
@@@ -49,6 -50,7 +50,7 @@@
  #include "intel_drv.h"
  #include "intel_frontbuffer.h"
  #include "intel_mocs.h"
+ #include "intel_pm.h"
  #include "intel_workarounds.h"
  
  static void i915_gem_flush_free_objects(struct drm_i915_private *i915);
@@@ -100,48 -102,7 +102,7 @@@ static void i915_gem_info_remove_obj(st
        spin_unlock(&dev_priv->mm.object_stat_lock);
  }
  
- static int
- i915_gem_wait_for_error(struct i915_gpu_error *error)
- {
-       int ret;
-       might_sleep();
-       /*
-        * Only wait 10 seconds for the gpu reset to complete to avoid hanging
-        * userspace. If it takes that long something really bad is going on and
-        * we should simply try to bail out and fail as gracefully as possible.
-        */
-       ret = wait_event_interruptible_timeout(error->reset_queue,
-                                              !i915_reset_backoff(error),
-                                              I915_RESET_TIMEOUT);
-       if (ret == 0) {
-               DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
-               return -EIO;
-       } else if (ret < 0) {
-               return ret;
-       } else {
-               return 0;
-       }
- }
- int i915_mutex_lock_interruptible(struct drm_device *dev)
- {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       int ret;
-       ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
-       if (ret)
-               return ret;
-       ret = mutex_lock_interruptible(&dev->struct_mutex);
-       if (ret)
-               return ret;
-       return 0;
- }
- static u32 __i915_gem_park(struct drm_i915_private *i915)
+ static void __i915_gem_park(struct drm_i915_private *i915)
  {
        intel_wakeref_t wakeref;
  
        GEM_BUG_ON(!list_empty(&i915->gt.active_rings));
  
        if (!i915->gt.awake)
-               return I915_EPOCH_INVALID;
-       GEM_BUG_ON(i915->gt.epoch == I915_EPOCH_INVALID);
+               return;
  
        /*
         * Be paranoid and flush a concurrent interrupt to make sure
  
        intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
  
-       return i915->gt.epoch;
+       i915_globals_park();
  }
  
  void i915_gem_park(struct drm_i915_private *i915)
@@@ -225,8 -184,7 +184,7 @@@ void i915_gem_unpark(struct drm_i915_pr
        i915->gt.awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
        GEM_BUG_ON(!i915->gt.awake);
  
-       if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */
-               i915->gt.epoch = 1;
+       i915_globals_unpark();
  
        intel_enable_gt_powersave(i915);
        i915_update_gfx_val(i915);
@@@ -351,7 -309,7 +309,7 @@@ static void __start_cpu_write(struct dr
                obj->cache_dirty = true;
  }
  
static void
+ void
  __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
                                struct sg_table *pages,
                                bool needs_clflush)
@@@ -459,8 -417,7 +417,7 @@@ int i915_gem_object_unbind(struct drm_i
  static long
  i915_gem_object_wait_fence(struct dma_fence *fence,
                           unsigned int flags,
-                          long timeout,
-                          struct intel_rps_client *rps_client)
+                          long timeout)
  {
        struct i915_request *rq;
  
        if (i915_request_completed(rq))
                goto out;
  
-       /*
-        * This client is about to stall waiting for the GPU. In many cases
-        * this is undesirable and limits the throughput of the system, as
-        * many clients cannot continue processing user input/output whilst
-        * blocked. RPS autotuning may take tens of milliseconds to respond
-        * to the GPU load and thus incurs additional latency for the client.
-        * We can circumvent that by promoting the GPU frequency to maximum
-        * before we wait. This makes the GPU throttle up much more quickly
-        * (good for benchmarks and user experience, e.g. window animations),
-        * but at a cost of spending more power processing the workload
-        * (bad for battery). Not all clients even want their results
-        * immediately and for them we should just let the GPU select its own
-        * frequency to maximise efficiency. To prevent a single client from
-        * forcing the clocks too high for the whole system, we only allow
-        * each client to waitboost once in a busy period.
-        */
-       if (rps_client && !i915_request_started(rq)) {
-               if (INTEL_GEN(rq->i915) >= 6)
-                       gen6_rps_boost(rq, rps_client);
-       }
        timeout = i915_request_wait(rq, flags, timeout);
  
  out:
  static long
  i915_gem_object_wait_reservation(struct reservation_object *resv,
                                 unsigned int flags,
-                                long timeout,
-                                struct intel_rps_client *rps_client)
+                                long timeout)
  {
        unsigned int seq = __read_seqcount_begin(&resv->seq);
        struct dma_fence *excl;
  
                for (i = 0; i < count; i++) {
                        timeout = i915_gem_object_wait_fence(shared[i],
-                                                            flags, timeout,
-                                                            rps_client);
+                                                            flags, timeout);
                        if (timeout < 0)
                                break;
  
        }
  
        if (excl && timeout >= 0)
-               timeout = i915_gem_object_wait_fence(excl, flags, timeout,
-                                                    rps_client);
+               timeout = i915_gem_object_wait_fence(excl, flags, timeout);
  
        dma_fence_put(excl);
  
@@@ -652,30 -585,19 +585,19 @@@ i915_gem_object_wait_priority(struct dr
   * @obj: i915 gem object
   * @flags: how to wait (under a lock, for all rendering or just for writes etc)
   * @timeout: how long to wait
-  * @rps_client: client (user process) to charge for any waitboosting
   */
  int
  i915_gem_object_wait(struct drm_i915_gem_object *obj,
                     unsigned int flags,
-                    long timeout,
-                    struct intel_rps_client *rps_client)
+                    long timeout)
  {
        might_sleep();
        GEM_BUG_ON(timeout < 0);
  
-       timeout = i915_gem_object_wait_reservation(obj->resv,
-                                                  flags, timeout,
-                                                  rps_client);
+       timeout = i915_gem_object_wait_reservation(obj->resv, flags, timeout);
        return timeout < 0 ? timeout : 0;
  }
  
- static struct intel_rps_client *to_rps_client(struct drm_file *file)
- {
-       struct drm_i915_file_private *fpriv = file->driver_priv;
-       return &fpriv->rps_client;
- }
  static int
  i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
                     struct drm_i915_gem_pwrite *args,
        return 0;
  }
  
- void *i915_gem_object_alloc(struct drm_i915_private *dev_priv)
- {
-       return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
- }
- void i915_gem_object_free(struct drm_i915_gem_object *obj)
- {
-       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
-       kmem_cache_free(dev_priv->objects, obj);
- }
  static int
  i915_gem_create(struct drm_file *file,
                struct drm_i915_private *dev_priv,
-               u64 size,
+               u64 *size_p,
                u32 *handle_p)
  {
        struct drm_i915_gem_object *obj;
-       int ret;
        u32 handle;
+       u64 size;
+       int ret;
  
-       size = roundup(size, PAGE_SIZE);
+       size = round_up(*size_p, PAGE_SIZE);
        if (size == 0)
                return -EINVAL;
  
                return ret;
  
        *handle_p = handle;
+       *size_p = size;
        return 0;
  }
  
@@@ -747,7 -660,7 +660,7 @@@ i915_gem_dumb_create(struct drm_file *f
        args->pitch = ALIGN(args->width * DIV_ROUND_UP(args->bpp, 8), 64);
        args->size = args->pitch * args->height;
        return i915_gem_create(file, to_i915(dev),
-                              args->size, &args->handle);
+                              &args->size, &args->handle);
  }
  
  static bool gpu_write_needs_clflush(struct drm_i915_gem_object *obj)
@@@ -772,7 -685,7 +685,7 @@@ i915_gem_create_ioctl(struct drm_devic
        i915_gem_flush_free_objects(dev_priv);
  
        return i915_gem_create(file, dev_priv,
-                              args->size, &args->handle);
+                              &args->size, &args->handle);
  }
  
  static inline enum fb_op_origin
@@@ -881,8 -794,7 +794,7 @@@ int i915_gem_obj_prepare_shmem_read(str
        ret = i915_gem_object_wait(obj,
                                   I915_WAIT_INTERRUPTIBLE |
                                   I915_WAIT_LOCKED,
-                                  MAX_SCHEDULE_TIMEOUT,
-                                  NULL);
+                                  MAX_SCHEDULE_TIMEOUT);
        if (ret)
                return ret;
  
@@@ -934,8 -846,7 +846,7 @@@ int i915_gem_obj_prepare_shmem_write(st
                                   I915_WAIT_INTERRUPTIBLE |
                                   I915_WAIT_LOCKED |
                                   I915_WAIT_ALL,
-                                  MAX_SCHEDULE_TIMEOUT,
-                                  NULL);
+                                  MAX_SCHEDULE_TIMEOUT);
        if (ret)
                return ret;
  
@@@ -1197,8 -1108,7 +1108,7 @@@ i915_gem_pread_ioctl(struct drm_device 
  
        ret = i915_gem_object_wait(obj,
                                   I915_WAIT_INTERRUPTIBLE,
-                                  MAX_SCHEDULE_TIMEOUT,
-                                  to_rps_client(file));
+                                  MAX_SCHEDULE_TIMEOUT);
        if (ret)
                goto out;
  
@@@ -1497,8 -1407,7 +1407,7 @@@ i915_gem_pwrite_ioctl(struct drm_devic
        ret = i915_gem_object_wait(obj,
                                   I915_WAIT_INTERRUPTIBLE |
                                   I915_WAIT_ALL,
-                                  MAX_SCHEDULE_TIMEOUT,
-                                  to_rps_client(file));
+                                  MAX_SCHEDULE_TIMEOUT);
        if (ret)
                goto err;
  
@@@ -1578,17 -1487,37 +1487,37 @@@ i915_gem_set_domain_ioctl(struct drm_de
        if ((write_domain | read_domains) & I915_GEM_GPU_DOMAINS)
                return -EINVAL;
  
-       /* Having something in the write domain implies it's in the read
+       /*
+        * Having something in the write domain implies it's in the read
         * domain, and only that read domain.  Enforce that in the request.
         */
-       if (write_domain != 0 && read_domains != write_domain)
+       if (write_domain && read_domains != write_domain)
                return -EINVAL;
  
+       if (!read_domains)
+               return 0;
        obj = i915_gem_object_lookup(file, args->handle);
        if (!obj)
                return -ENOENT;
  
-       /* Try to flush the object off the GPU without holding the lock.
+       /*
+        * Already in the desired write domain? Nothing for us to do!
+        *
+        * We apply a little bit of cunning here to catch a broader set of
+        * no-ops. If obj->write_domain is set, we must be in the same
+        * obj->read_domains, and only that domain. Therefore, if that
+        * obj->write_domain matches the request read_domains, we are
+        * already in the same read/write domain and can skip the operation,
+        * without having to further check the requested write_domain.
+        */
+       if (READ_ONCE(obj->write_domain) == read_domains) {
+               err = 0;
+               goto out;
+       }
+       /*
+        * Try to flush the object off the GPU without holding the lock.
         * We will repeat the flush holding the lock in the normal manner
         * to catch cases where we are gazumped.
         */
                                   I915_WAIT_INTERRUPTIBLE |
                                   I915_WAIT_PRIORITY |
                                   (write_domain ? I915_WAIT_ALL : 0),
-                                  MAX_SCHEDULE_TIMEOUT,
-                                  to_rps_client(file));
+                                  MAX_SCHEDULE_TIMEOUT);
        if (err)
                goto out;
  
@@@ -1808,6 -1736,9 +1736,9 @@@ static unsigned int tile_row_pages(cons
   * 2 - Recognise WC as a separate cache domain so that we can flush the
   *     delayed writes via GTT before performing direct access via WC.
   *
+  * 3 - Remove implicit set-domain(GTT) and synchronisation on initial
+  *     pagefault; swapin remains transparent.
+  *
   * Restrictions:
   *
   *  * snoopable objects cannot be accessed via the GTT. It can cause machine
   */
  int i915_gem_mmap_gtt_version(void)
  {
-       return 2;
+       return 3;
  }
  
  static inline struct i915_ggtt_view
@@@ -1891,6 -1822,7 +1822,7 @@@ vm_fault_t i915_gem_fault(struct vm_fau
        intel_wakeref_t wakeref;
        struct i915_vma *vma;
        pgoff_t page_offset;
+       int srcu;
        int ret;
  
        /* Sanity check that we allow writing into this object */
  
        trace_i915_gem_object_fault(obj, page_offset, true, write);
  
-       /* Try to flush the object off the GPU first without holding the lock.
-        * Upon acquiring the lock, we will perform our sanity checks and then
-        * repeat the flush holding the lock in the normal manner to catch cases
-        * where we are gazumped.
-        */
-       ret = i915_gem_object_wait(obj,
-                                  I915_WAIT_INTERRUPTIBLE,
-                                  MAX_SCHEDULE_TIMEOUT,
-                                  NULL);
-       if (ret)
-               goto err;
        ret = i915_gem_object_pin_pages(obj);
        if (ret)
                goto err;
  
        wakeref = intel_runtime_pm_get(dev_priv);
  
+       srcu = i915_reset_trylock(dev_priv);
+       if (srcu < 0) {
+               ret = srcu;
+               goto err_rpm;
+       }
        ret = i915_mutex_lock_interruptible(dev);
        if (ret)
-               goto err_rpm;
+               goto err_reset;
  
        /* Access to snoopable pages through the GTT is incoherent. */
        if (obj->cache_level != I915_CACHE_NONE && !HAS_LLC(dev_priv)) {
                goto err_unlock;
        }
  
        /* Now pin it into the GTT as needed */
        vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
                                       PIN_MAPPABLE |
                goto err_unlock;
        }
  
-       ret = i915_gem_object_set_to_gtt_domain(obj, write);
-       if (ret)
-               goto err_unpin;
        ret = i915_vma_pin_fence(vma);
        if (ret)
                goto err_unpin;
@@@ -1995,6 -1916,8 +1916,8 @@@ err_unpin
        __i915_vma_unpin(vma);
  err_unlock:
        mutex_unlock(&dev->struct_mutex);
+ err_reset:
+       i915_reset_unlock(dev_priv, srcu);
  err_rpm:
        intel_runtime_pm_put(dev_priv, wakeref);
        i915_gem_object_unpin_pages(obj);
@@@ -2007,7 -1930,7 +1930,7 @@@ err
                 * fail). But any other -EIO isn't ours (e.g. swap in failure)
                 * and so needs to be reported.
                 */
-               if (!i915_terminally_wedged(&dev_priv->gpu_error))
+               if (!i915_terminally_wedged(dev_priv))
                        return VM_FAULT_SIGBUS;
                /* else: fall through */
        case -EAGAIN:
@@@ -2280,7 -2203,6 +2203,6 @@@ i915_gem_object_put_pages_gtt(struct dr
        struct page *page;
  
        __i915_gem_object_release_shmem(obj, pages, true);
        i915_gem_gtt_finish_pages(obj, pages);
  
        if (i915_gem_object_needs_bit17_swizzle(obj))
@@@ -2488,7 -2410,7 +2410,7 @@@ rebuild_st
                do {
                        cond_resched();
                        page = shmem_read_mapping_page_gfp(mapping, i, gfp);
-                       if (likely(!IS_ERR(page)))
+                       if (!IS_ERR(page))
                                break;
  
                        if (!*s) {
@@@ -2622,6 -2544,14 +2544,14 @@@ void __i915_gem_object_set_pages(struc
  
        lockdep_assert_held(&obj->mm.lock);
  
+       /* Make the pages coherent with the GPU (flushing any swapin). */
+       if (obj->cache_dirty) {
+               obj->write_domain = 0;
+               if (i915_gem_object_has_struct_page(obj))
+                       drm_clflush_sg(pages);
+               obj->cache_dirty = false;
+       }
        obj->mm.get_page.sg_pos = pages->sgl;
        obj->mm.get_page.sg_idx = 0;
  
@@@ -2823,6 -2753,33 +2753,33 @@@ err_unlock
        goto out_unlock;
  }
  
+ void __i915_gem_object_flush_map(struct drm_i915_gem_object *obj,
+                                unsigned long offset,
+                                unsigned long size)
+ {
+       enum i915_map_type has_type;
+       void *ptr;
+       GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+       GEM_BUG_ON(range_overflows_t(typeof(obj->base.size),
+                                    offset, size, obj->base.size));
+       obj->mm.dirty = true;
+       if (obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE)
+               return;
+       ptr = page_unpack_bits(obj->mm.mapping, &has_type);
+       if (has_type == I915_MAP_WC)
+               return;
+       drm_clflush_virt_range(ptr + offset, size);
+       if (size == obj->base.size) {
+               obj->write_domain &= ~I915_GEM_DOMAIN_CPU;
+               obj->cache_dirty = false;
+       }
+ }
  static int
  i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
                           const struct drm_i915_gem_pwrite *arg)
        u64 remain, offset;
        unsigned int pg;
  
-       /* Before we instantiate/pin the backing store for our use, we
+       /* Caller already validated user args */
+       GEM_BUG_ON(!access_ok(user_data, arg->size));
+       /*
+        * Before we instantiate/pin the backing store for our use, we
         * can prepopulate the shmemfs filp efficiently using a write into
         * the pagecache. We avoid the penalty of instantiating all the
         * pages, important if the user is just writing to a few and never
        if (obj->mm.madv != I915_MADV_WILLNEED)
                return -EFAULT;
  
-       /* Before the pages are instantiated the object is treated as being
+       /*
+        * Before the pages are instantiated the object is treated as being
         * in the CPU domain. The pages will be clflushed as required before
         * use, and we can freely write into the pages directly. If userspace
         * races pwrite with any other operation; corruption will ensue -
                struct page *page;
                void *data, *vaddr;
                int err;
+               char c;
  
                len = PAGE_SIZE - pg;
                if (len > remain)
                        len = remain;
  
+               /* Prefault the user page to reduce potential recursion */
+               err = __get_user(c, user_data);
+               if (err)
+                       return err;
+               err = __get_user(c, user_data + len - 1);
+               if (err)
+                       return err;
                err = pagecache_write_begin(obj->base.filp, mapping,
                                            offset, len, 0,
                                            &page, &data);
                if (err < 0)
                        return err;
  
-               vaddr = kmap(page);
-               unwritten = copy_from_user(vaddr + pg, user_data, len);
-               kunmap(page);
+               vaddr = kmap_atomic(page);
+               unwritten = __copy_from_user_inatomic(vaddr + pg,
+                                                     user_data,
+                                                     len);
+               kunmap_atomic(vaddr);
  
                err = pagecache_write_end(obj->base.filp, mapping,
                                          offset, len, len - unwritten,
                if (err < 0)
                        return err;
  
+               /* We don't handle -EFAULT, leave it to the caller to check */
                if (unwritten)
-                       return -EFAULT;
+                       return -ENODEV;
  
                remain -= len;
                user_data += len;
        return 0;
  }
  
- static bool match_ring(struct i915_request *rq)
- {
-       struct drm_i915_private *dev_priv = rq->i915;
-       u32 ring = I915_READ(RING_START(rq->engine->mmio_base));
-       return ring == i915_ggtt_offset(rq->ring->vma);
- }
- struct i915_request *
- i915_gem_find_active_request(struct intel_engine_cs *engine)
- {
-       struct i915_request *request, *active = NULL;
-       unsigned long flags;
-       /*
-        * We are called by the error capture, reset and to dump engine
-        * state at random points in time. In particular, note that neither is
-        * crucially ordered with an interrupt. After a hang, the GPU is dead
-        * and we assume that no more writes can happen (we waited long enough
-        * for all writes that were in transaction to be flushed) - adding an
-        * extra delay for a recent interrupt is pointless. Hence, we do
-        * not need an engine->irq_seqno_barrier() before the seqno reads.
-        * At all other times, we must assume the GPU is still running, but
-        * we only care about the snapshot of this moment.
-        */
-       spin_lock_irqsave(&engine->timeline.lock, flags);
-       list_for_each_entry(request, &engine->timeline.requests, link) {
-               if (i915_request_completed(request))
-                       continue;
-               if (!i915_request_started(request))
-                       break;
-               /* More than one preemptible request may match! */
-               if (!match_ring(request))
-                       break;
-               active = request;
-               break;
-       }
-       spin_unlock_irqrestore(&engine->timeline.lock, flags);
-       return active;
- }
  static void
  i915_gem_retire_work_handler(struct work_struct *work)
  {
                                   round_jiffies_up_relative(HZ));
  }
  
- static void shrink_caches(struct drm_i915_private *i915)
+ static bool switch_to_kernel_context_sync(struct drm_i915_private *i915,
+                                         unsigned long mask)
  {
-       /*
-        * kmem_cache_shrink() discards empty slabs and reorders partially
-        * filled slabs to prioritise allocating from the mostly full slabs,
-        * with the aim of reducing fragmentation.
-        */
-       kmem_cache_shrink(i915->priorities);
-       kmem_cache_shrink(i915->dependencies);
-       kmem_cache_shrink(i915->requests);
-       kmem_cache_shrink(i915->luts);
-       kmem_cache_shrink(i915->vmas);
-       kmem_cache_shrink(i915->objects);
- }
- struct sleep_rcu_work {
-       union {
-               struct rcu_head rcu;
-               struct work_struct work;
-       };
-       struct drm_i915_private *i915;
-       unsigned int epoch;
- };
+       bool result = true;
  
- static inline bool
- same_epoch(struct drm_i915_private *i915, unsigned int epoch)
- {
        /*
-        * There is a small chance that the epoch wrapped since we started
-        * sleeping. If we assume that epoch is at least a u32, then it will
-        * take at least 2^32 * 100ms for it to wrap, or about 326 years.
+        * Even if we fail to switch, give whatever is running a small chance
+        * to save itself before we report the failure. Yes, this may be a
+        * false positive due to e.g. ENOMEM, caveat emptor!
         */
-       return epoch == READ_ONCE(i915->gt.epoch);
- }
- static void __sleep_work(struct work_struct *work)
- {
-       struct sleep_rcu_work *s = container_of(work, typeof(*s), work);
-       struct drm_i915_private *i915 = s->i915;
-       unsigned int epoch = s->epoch;
-       kfree(s);
-       if (same_epoch(i915, epoch))
-               shrink_caches(i915);
- }
- static void __sleep_rcu(struct rcu_head *rcu)
- {
-       struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
-       struct drm_i915_private *i915 = s->i915;
+       if (i915_gem_switch_to_kernel_context(i915, mask))
+               result = false;
  
-       destroy_rcu_head(&s->rcu);
+       if (i915_gem_wait_for_idle(i915,
+                                  I915_WAIT_LOCKED |
+                                  I915_WAIT_FOR_IDLE_BOOST,
+                                  I915_GEM_IDLE_TIMEOUT))
+               result = false;
+       if (!result) {
+               if (i915_modparams.reset) { /* XXX hide warning from gem_eio */
+                       dev_err(i915->drm.dev,
+                               "Failed to idle engines, declaring wedged!\n");
+                       GEM_TRACE_DUMP();
+               }
  
-       if (same_epoch(i915, s->epoch)) {
-               INIT_WORK(&s->work, __sleep_work);
-               queue_work(i915->wq, &s->work);
-       } else {
-               kfree(s);
+               /* Forcibly cancel outstanding work and leave the gpu quiet. */
+               i915_gem_set_wedged(i915);
        }
- }
  
- static inline bool
- new_requests_since_last_retire(const struct drm_i915_private *i915)
- {
-       return (READ_ONCE(i915->gt.active_requests) ||
-               work_pending(&i915->gt.idle_work.work));
+       i915_retire_requests(i915); /* ensure we flush after wedging */
+       return result;
  }
  
- static void assert_kernel_context_is_current(struct drm_i915_private *i915)
+ static bool load_power_context(struct drm_i915_private *i915)
  {
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
+       /* Force loading the kernel context on all engines */
+       if (!switch_to_kernel_context_sync(i915, ALL_ENGINES))
+               return false;
  
-       if (i915_terminally_wedged(&i915->gpu_error))
-               return;
+       /*
+        * Immediately park the GPU so that we enable powersaving and
+        * treat it as idle. The next time we issue a request, we will
+        * unpark and start using the engine->pinned_default_state, otherwise
+        * it is in limbo and an early reset may fail.
+        */
+       __i915_gem_park(i915);
  
-       GEM_BUG_ON(i915->gt.active_requests);
-       for_each_engine(engine, i915, id) {
-               GEM_BUG_ON(__i915_active_request_peek(&engine->timeline.last_request));
-               GEM_BUG_ON(engine->last_retired_context !=
-                          to_intel_context(i915->kernel_context, engine));
-       }
+       return true;
  }
  
  static void
  i915_gem_idle_work_handler(struct work_struct *work)
  {
-       struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv), gt.idle_work.work);
-       unsigned int epoch = I915_EPOCH_INVALID;
+       struct drm_i915_private *i915 =
+               container_of(work, typeof(*i915), gt.idle_work.work);
        bool rearm_hangcheck;
  
-       if (!READ_ONCE(dev_priv->gt.awake))
+       if (!READ_ONCE(i915->gt.awake))
                return;
  
-       if (READ_ONCE(dev_priv->gt.active_requests))
+       if (READ_ONCE(i915->gt.active_requests))
                return;
  
-       /*
-        * Flush out the last user context, leaving only the pinned
-        * kernel context resident. When we are idling on the kernel_context,
-        * no more new requests (with a context switch) are emitted and we
-        * can finally rest. A consequence is that the idle work handler is
-        * always called at least twice before idling (and if the system is
-        * idle that implies a round trip through the retire worker).
-        */
-       mutex_lock(&dev_priv->drm.struct_mutex);
-       i915_gem_switch_to_kernel_context(dev_priv);
-       mutex_unlock(&dev_priv->drm.struct_mutex);
-       GEM_TRACE("active_requests=%d (after switch-to-kernel-context)\n",
-                 READ_ONCE(dev_priv->gt.active_requests));
-       /*
-        * Wait for last execlists context complete, but bail out in case a
-        * new request is submitted. As we don't trust the hardware, we
-        * continue on if the wait times out. This is necessary to allow
-        * the machine to suspend even if the hardware dies, and we will
-        * try to recover in resume (after depriving the hardware of power,
-        * it may be in a better mmod).
-        */
-       __wait_for(if (new_requests_since_last_retire(dev_priv)) return,
-                  intel_engines_are_idle(dev_priv),
-                  I915_IDLE_ENGINES_TIMEOUT * 1000,
-                  10, 500);
        rearm_hangcheck =
-               cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
+               cancel_delayed_work_sync(&i915->gpu_error.hangcheck_work);
  
-       if (!mutex_trylock(&dev_priv->drm.struct_mutex)) {
+       if (!mutex_trylock(&i915->drm.struct_mutex)) {
                /* Currently busy, come back later */
-               mod_delayed_work(dev_priv->wq,
-                                &dev_priv->gt.idle_work,
+               mod_delayed_work(i915->wq,
+                                &i915->gt.idle_work,
                                 msecs_to_jiffies(50));
                goto out_rearm;
        }
  
        /*
-        * New request retired after this work handler started, extend active
-        * period until next instance of the work.
+        * Flush out the last user context, leaving only the pinned
+        * kernel context resident. Should anything unfortunate happen
+        * while we are idle (such as the GPU being power cycled), no users
+        * will be harmed.
         */
-       if (new_requests_since_last_retire(dev_priv))
-               goto out_unlock;
+       if (!work_pending(&i915->gt.idle_work.work) &&
+           !i915->gt.active_requests) {
+               ++i915->gt.active_requests; /* don't requeue idle */
  
-       epoch = __i915_gem_park(dev_priv);
+               switch_to_kernel_context_sync(i915, i915->gt.active_engines);
  
-       assert_kernel_context_is_current(dev_priv);
+               if (!--i915->gt.active_requests) {
+                       __i915_gem_park(i915);
+                       rearm_hangcheck = false;
+               }
+       }
  
-       rearm_hangcheck = false;
- out_unlock:
-       mutex_unlock(&dev_priv->drm.struct_mutex);
+       mutex_unlock(&i915->drm.struct_mutex);
  
  out_rearm:
        if (rearm_hangcheck) {
-               GEM_BUG_ON(!dev_priv->gt.awake);
-               i915_queue_hangcheck(dev_priv);
-       }
-       /*
-        * When we are idle, it is an opportune time to reap our caches.
-        * However, we have many objects that utilise RCU and the ordered
-        * i915->wq that this work is executing on. To try and flush any
-        * pending frees now we are idle, we first wait for an RCU grace
-        * period, and then queue a task (that will run last on the wq) to
-        * shrink and re-optimize the caches.
-        */
-       if (same_epoch(dev_priv, epoch)) {
-               struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
-               if (s) {
-                       init_rcu_head(&s->rcu);
-                       s->i915 = dev_priv;
-                       s->epoch = epoch;
-                       call_rcu(&s->rcu, __sleep_rcu);
-               }
+               GEM_BUG_ON(!i915->gt.awake);
+               i915_queue_hangcheck(i915);
        }
  }
  
@@@ -3171,7 -3026,7 +3026,7 @@@ void i915_gem_close_object(struct drm_g
                list_del(&lut->obj_link);
                list_del(&lut->ctx_link);
  
-               kmem_cache_free(i915->luts, lut);
+               i915_lut_handle_free(lut);
                __i915_gem_object_release_unless_active(obj);
        }
  
@@@ -3234,8 -3089,7 +3089,7 @@@ i915_gem_wait_ioctl(struct drm_device *
                                   I915_WAIT_INTERRUPTIBLE |
                                   I915_WAIT_PRIORITY |
                                   I915_WAIT_ALL,
-                                  to_wait_timeout(args->timeout_ns),
-                                  to_rps_client(file));
+                                  to_wait_timeout(args->timeout_ns));
  
        if (args->timeout_ns > 0) {
                args->timeout_ns -= ktime_to_ns(ktime_sub(ktime_get(), start));
@@@ -3304,7 -3158,7 +3158,7 @@@ wait_for_timelines(struct drm_i915_priv
                 * stalls, so allow the gpu to boost to maximum clocks.
                 */
                if (flags & I915_WAIT_FOR_IDLE_BOOST)
-                       gen6_rps_boost(rq, NULL);
+                       gen6_rps_boost(rq);
  
                timeout = i915_request_wait(rq, flags, timeout);
                i915_request_put(rq);
@@@ -3340,19 -3194,11 +3194,11 @@@ int i915_gem_wait_for_idle(struct drm_i
  
                lockdep_assert_held(&i915->drm.struct_mutex);
  
-               if (GEM_SHOW_DEBUG() && !timeout) {
-                       /* Presume that timeout was non-zero to begin with! */
-                       dev_warn(&i915->drm.pdev->dev,
-                                "Missed idle-completion interrupt!\n");
-                       GEM_TRACE_DUMP();
-               }
                err = wait_for_engines(i915);
                if (err)
                        return err;
  
                i915_retire_requests(i915);
-               GEM_BUG_ON(i915->gt.active_requests);
        }
  
        return 0;
@@@ -3399,8 -3245,7 +3245,7 @@@ i915_gem_object_set_to_wc_domain(struc
                                   I915_WAIT_INTERRUPTIBLE |
                                   I915_WAIT_LOCKED |
                                   (write ? I915_WAIT_ALL : 0),
-                                  MAX_SCHEDULE_TIMEOUT,
-                                  NULL);
+                                  MAX_SCHEDULE_TIMEOUT);
        if (ret)
                return ret;
  
@@@ -3462,8 -3307,7 +3307,7 @@@ i915_gem_object_set_to_gtt_domain(struc
                                   I915_WAIT_INTERRUPTIBLE |
                                   I915_WAIT_LOCKED |
                                   (write ? I915_WAIT_ALL : 0),
-                                  MAX_SCHEDULE_TIMEOUT,
-                                  NULL);
+                                  MAX_SCHEDULE_TIMEOUT);
        if (ret)
                return ret;
  
@@@ -3578,8 -3422,7 +3422,7 @@@ restart
                                           I915_WAIT_INTERRUPTIBLE |
                                           I915_WAIT_LOCKED |
                                           I915_WAIT_ALL,
-                                          MAX_SCHEDULE_TIMEOUT,
-                                          NULL);
+                                          MAX_SCHEDULE_TIMEOUT);
                if (ret)
                        return ret;
  
@@@ -3717,8 -3560,7 +3560,7 @@@ int i915_gem_set_caching_ioctl(struct d
  
        ret = i915_gem_object_wait(obj,
                                   I915_WAIT_INTERRUPTIBLE,
-                                  MAX_SCHEDULE_TIMEOUT,
-                                  to_rps_client(file));
+                                  MAX_SCHEDULE_TIMEOUT);
        if (ret)
                goto out;
  
@@@ -3844,8 -3686,7 +3686,7 @@@ i915_gem_object_set_to_cpu_domain(struc
                                   I915_WAIT_INTERRUPTIBLE |
                                   I915_WAIT_LOCKED |
                                   (write ? I915_WAIT_ALL : 0),
-                                  MAX_SCHEDULE_TIMEOUT,
-                                  NULL);
+                                  MAX_SCHEDULE_TIMEOUT);
        if (ret)
                return ret;
  
@@@ -3891,8 -3732,9 +3732,9 @@@ i915_gem_ring_throttle(struct drm_devic
        long ret;
  
        /* ABI: return -EIO if already wedged */
-       if (i915_terminally_wedged(&dev_priv->gpu_error))
-               return -EIO;
+       ret = i915_terminally_wedged(dev_priv);
+       if (ret)
+               return ret;
  
        spin_lock(&file_priv->mm.lock);
        list_for_each_entry(request, &file_priv->mm.request_list, client_link) {
@@@ -3968,7 -3810,7 +3810,7 @@@ i915_gem_object_ggtt_pin(struct drm_i91
        }
  
        vma = i915_vma_instance(obj, vm, view);
-       if (unlikely(IS_ERR(vma)))
+       if (IS_ERR(vma))
                return vma;
  
        if (i915_vma_misplaced(vma, size, alignment, flags)) {
        return vma;
  }
  
- static __always_inline unsigned int __busy_read_flag(unsigned int id)
+ static __always_inline u32 __busy_read_flag(u8 id)
  {
-       /* Note that we could alias engines in the execbuf API, but
-        * that would be very unwise as it prevents userspace from
-        * fine control over engine selection. Ahem.
-        *
-        * This should be something like EXEC_MAX_ENGINE instead of
-        * I915_NUM_ENGINES.
-        */
-       BUILD_BUG_ON(I915_NUM_ENGINES > 16);
-       return 0x10000 << id;
+       if (id == (u8)I915_ENGINE_CLASS_INVALID)
+               return 0xffff0000u;
+       GEM_BUG_ON(id >= 16);
+       return 0x10000u << id;
  }
  
- static __always_inline unsigned int __busy_write_id(unsigned int id)
+ static __always_inline u32 __busy_write_id(u8 id)
  {
-       /* The uABI guarantees an active writer is also amongst the read
+       /*
+        * The uABI guarantees an active writer is also amongst the read
         * engines. This would be true if we accessed the activity tracking
         * under the lock, but as we perform the lookup of the object and
         * its activity locklessly we can not guarantee that the last_write
         * last_read - hence we always set both read and write busy for
         * last_write.
         */
-       return id | __busy_read_flag(id);
+       if (id == (u8)I915_ENGINE_CLASS_INVALID)
+               return 0xffffffffu;
+       return (id + 1) | __busy_read_flag(id);
  }
  
  static __always_inline unsigned int
- __busy_set_if_active(const struct dma_fence *fence,
-                    unsigned int (*flag)(unsigned int id))
+ __busy_set_if_active(const struct dma_fence *fence, u32 (*flag)(u8 id))
  {
-       struct i915_request *rq;
+       const struct i915_request *rq;
  
-       /* We have to check the current hw status of the fence as the uABI
+       /*
+        * We have to check the current hw status of the fence as the uABI
         * guarantees forward progress. We could rely on the idle worker
         * to eventually flush us, but to minimise latency just ask the
         * hardware.
                return 0;
  
        /* opencode to_request() in order to avoid const warnings */
-       rq = container_of(fence, struct i915_request, fence);
+       rq = container_of(fence, const struct i915_request, fence);
        if (i915_request_completed(rq))
                return 0;
  
-       return flag(rq->engine->uabi_id);
+       /* Beware type-expansion follies! */
+       BUILD_BUG_ON(!typecheck(u8, rq->engine->uabi_class));
+       return flag(rq->engine->uabi_class);
  }
  
  static __always_inline unsigned int
@@@ -4081,7 -3925,8 +3925,8 @@@ i915_gem_busy_ioctl(struct drm_device *
        if (!obj)
                goto out;
  
-       /* A discrepancy here is that we do not report the status of
+       /*
+        * A discrepancy here is that we do not report the status of
         * non-i915 fences, i.e. even though we may report the object as idle,
         * a call to set-domain may still stall waiting for foreign rendering.
         * This also means that wait-ioctl may report an object as busy,
@@@ -4281,7 -4126,7 +4126,7 @@@ i915_gem_object_create(struct drm_i915_
        if (overflows_type(size, obj->base.size))
                return ERR_PTR(-E2BIG);
  
-       obj = i915_gem_object_alloc(dev_priv);
+       obj = i915_gem_object_alloc();
        if (obj == NULL)
                return ERR_PTR(-ENOMEM);
  
@@@ -4354,7 -4199,7 +4199,7 @@@ static bool discard_backing_storage(str
         * acquiring such a reference whilst we are in the middle of
         * freeing the object.
         */
 -      return atomic_long_read(&obj->base.filp->f_count) == 1;
 +      return file_count(obj->base.filp) == 1;
  }
  
  static void __i915_gem_free_objects(struct drm_i915_private *i915,
                drm_gem_object_release(&obj->base);
                i915_gem_info_remove_obj(i915, obj->base.size);
  
-               kfree(obj->bit_17);
+               bitmap_free(obj->bit_17);
                i915_gem_object_free(obj);
  
                GEM_BUG_ON(!atomic_read(&i915->mm.free_count));
@@@ -4537,7 -4382,7 +4382,7 @@@ void i915_gem_sanitize(struct drm_i915_
        GEM_TRACE("\n");
  
        wakeref = intel_runtime_pm_get(i915);
-       intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+       intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
  
        /*
         * As we have just resumed the machine and woken the device up from
         * back to defaults, recovering from whatever wedged state we left it
         * in and so worth trying to use the device once more.
         */
-       if (i915_terminally_wedged(&i915->gpu_error))
+       if (i915_terminally_wedged(i915))
                i915_gem_unset_wedged(i915);
  
        /*
         */
        intel_engines_sanitize(i915, false);
  
-       intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+       intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
        intel_runtime_pm_put(i915, wakeref);
  
        mutex_lock(&i915->drm.struct_mutex);
        mutex_unlock(&i915->drm.struct_mutex);
  }
  
int i915_gem_suspend(struct drm_i915_private *i915)
void i915_gem_suspend(struct drm_i915_private *i915)
  {
        intel_wakeref_t wakeref;
-       int ret;
  
        GEM_TRACE("\n");
  
        wakeref = intel_runtime_pm_get(i915);
-       intel_suspend_gt_powersave(i915);
  
        flush_workqueue(i915->wq);
  
         * state. Fortunately, the kernel_context is disposable and we do
         * not rely on its state.
         */
-       if (!i915_terminally_wedged(&i915->gpu_error)) {
-               ret = i915_gem_switch_to_kernel_context(i915);
-               if (ret)
-                       goto err_unlock;
-               ret = i915_gem_wait_for_idle(i915,
-                                            I915_WAIT_INTERRUPTIBLE |
-                                            I915_WAIT_LOCKED |
-                                            I915_WAIT_FOR_IDLE_BOOST,
-                                            MAX_SCHEDULE_TIMEOUT);
-               if (ret && ret != -EIO)
-                       goto err_unlock;
-               assert_kernel_context_is_current(i915);
-       }
-       i915_retire_requests(i915); /* ensure we flush after wedging */
+       switch_to_kernel_context_sync(i915, i915->gt.active_engines);
  
        mutex_unlock(&i915->drm.struct_mutex);
        i915_reset_flush(i915);
         */
        drain_delayed_work(&i915->gt.idle_work);
  
-       intel_uc_suspend(i915);
        /*
         * Assert that we successfully flushed all the work and
         * reset the GPU back to its idle, low power state.
         */
-       WARN_ON(i915->gt.awake);
-       if (WARN_ON(!intel_engines_are_idle(i915)))
-               i915_gem_set_wedged(i915); /* no hope, discard everything */
+       GEM_BUG_ON(i915->gt.awake);
  
-       intel_runtime_pm_put(i915, wakeref);
-       return 0;
+       intel_uc_suspend(i915);
  
- err_unlock:
-       mutex_unlock(&i915->drm.struct_mutex);
        intel_runtime_pm_put(i915, wakeref);
-       return ret;
  }
  
  void i915_gem_suspend_late(struct drm_i915_private *i915)
@@@ -4683,7 -4503,7 +4503,7 @@@ void i915_gem_resume(struct drm_i915_pr
        WARN_ON(i915->gt.awake);
  
        mutex_lock(&i915->drm.struct_mutex);
-       intel_uncore_forcewake_get(i915, FORCEWAKE_ALL);
+       intel_uncore_forcewake_get(&i915->uncore, FORCEWAKE_ALL);
  
        i915_gem_restore_gtt_mappings(i915);
        i915_gem_restore_fences(i915);
         * guarantee that the context image is complete. So let's just reset
         * it and start again.
         */
-       i915->gt.resume(i915);
+       intel_gt_resume(i915);
  
        if (i915_gem_init_hw(i915))
                goto err_wedged;
        intel_uc_resume(i915);
  
        /* Always reload a context for powersaving. */
-       if (i915_gem_switch_to_kernel_context(i915))
+       if (!load_power_context(i915))
                goto err_wedged;
  
  out_unlock:
-       intel_uncore_forcewake_put(i915, FORCEWAKE_ALL);
+       intel_uncore_forcewake_put(&i915->uncore, FORCEWAKE_ALL);
        mutex_unlock(&i915->drm.struct_mutex);
        return;
  
  err_wedged:
-       if (!i915_terminally_wedged(&i915->gpu_error)) {
-               DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
+       if (!i915_reset_failed(i915)) {
+               dev_err(i915->drm.dev,
+                       "Failed to re-initialize GPU, declaring it wedged!\n");
                i915_gem_set_wedged(i915);
        }
        goto out_unlock;
@@@ -4781,6 -4602,8 +4602,8 @@@ static int __i915_gem_restart_engines(v
                }
        }
  
+       intel_engines_set_scheduler_caps(i915);
        return 0;
  }
  
@@@ -4791,7 -4614,7 +4614,7 @@@ int i915_gem_init_hw(struct drm_i915_pr
        dev_priv->gt.last_init_time = ktime_get();
  
        /* Double layer security blanket, see i915_gem_init() */
-       intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+       intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
  
        if (HAS_EDRAM(dev_priv) && INTEL_GEN(dev_priv) < 9)
                I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
        init_unused_rings(dev_priv);
  
        BUG_ON(!dev_priv->kernel_context);
-       if (i915_terminally_wedged(&dev_priv->gpu_error)) {
-               ret = -EIO;
+       ret = i915_terminally_wedged(dev_priv);
+       if (ret)
                goto out;
-       }
  
        ret = i915_ppgtt_init_hw(dev_priv);
        if (ret) {
        if (ret)
                goto cleanup_uc;
  
-       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+       intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
  
        return 0;
  
  cleanup_uc:
        intel_uc_fini_hw(dev_priv);
  out:
-       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+       intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
  
        return ret;
  }
@@@ -4864,7 -4686,7 +4686,7 @@@ static int __intel_engines_record_defau
        struct i915_gem_context *ctx;
        struct intel_engine_cs *engine;
        enum intel_engine_id id;
-       int err;
+       int err = 0;
  
        /*
         * As we reset the gpu during very early sanitisation, the current
                        goto err_active;
        }
  
-       err = i915_gem_switch_to_kernel_context(i915);
-       if (err)
-               goto err_active;
-       if (i915_gem_wait_for_idle(i915, I915_WAIT_LOCKED, HZ / 5)) {
-               i915_gem_set_wedged(i915);
-               err = -EIO; /* Caller will declare us wedged */
+       /* Flush the default context image to memory, and enable powersaving. */
+       if (!load_power_context(i915)) {
+               err = -EIO;
                goto err_active;
        }
  
-       assert_kernel_context_is_current(i915);
-       /*
-        * Immediately park the GPU so that we enable powersaving and
-        * treat it as idle. The next time we issue a request, we will
-        * unpark and start using the engine->pinned_default_state, otherwise
-        * it is in limbo and an early reset may fail.
-        */
-       __i915_gem_park(i915);
        for_each_engine(engine, i915, id) {
+               struct intel_context *ce;
                struct i915_vma *state;
                void *vaddr;
  
-               GEM_BUG_ON(to_intel_context(ctx, engine)->pin_count);
+               ce = intel_context_lookup(ctx, engine);
+               if (!ce)
+                       continue;
  
-               state = to_intel_context(ctx, engine)->state;
+               state = ce->state;
                if (!state)
                        continue;
  
+               GEM_BUG_ON(intel_context_is_pinned(ce));
                /*
                 * As we will hold a reference to the logical state, it will
                 * not be torn down with the context, and importantly the
                        goto err_active;
  
                engine->default_state = i915_gem_object_get(state->obj);
+               i915_gem_object_set_cache_coherency(engine->default_state,
+                                                   I915_CACHE_LLC);
  
                /* Check we can acquire the image of the context state */
                vaddr = i915_gem_object_pin_map(engine->default_state,
@@@ -4982,19 -4797,10 +4797,10 @@@ out_ctx
  err_active:
        /*
         * If we have to abandon now, we expect the engines to be idle
-        * and ready to be torn-down. First try to flush any remaining
-        * request, ensure we are pointing at the kernel context and
-        * then remove it.
+        * and ready to be torn-down. The quickest way we can accomplish
+        * this is by declaring ourselves wedged.
         */
-       if (WARN_ON(i915_gem_switch_to_kernel_context(i915)))
-               goto out_ctx;
-       if (WARN_ON(i915_gem_wait_for_idle(i915,
-                                          I915_WAIT_LOCKED,
-                                          MAX_SCHEDULE_TIMEOUT)))
-               goto out_ctx;
-       i915_gem_contexts_lost(i915);
+       i915_gem_set_wedged(i915);
        goto out_ctx;
  }
  
@@@ -5047,13 -4853,10 +4853,10 @@@ int i915_gem_init(struct drm_i915_priva
  
        dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
  
-       if (HAS_LOGICAL_RING_CONTEXTS(dev_priv)) {
-               dev_priv->gt.resume = intel_lr_context_resume;
+       if (HAS_LOGICAL_RING_CONTEXTS(dev_priv))
                dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
-       } else {
-               dev_priv->gt.resume = intel_legacy_submission_resume;
+       else
                dev_priv->gt.cleanup_engine = intel_engine_cleanup;
-       }
  
        i915_timelines_init(dev_priv);
  
         * just magically go away.
         */
        mutex_lock(&dev_priv->drm.struct_mutex);
-       intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
+       intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
  
        ret = i915_gem_init_ggtt(dev_priv);
        if (ret) {
                goto err_init_hw;
        }
  
-       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+       intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
        mutex_unlock(&dev_priv->drm.struct_mutex);
  
        return 0;
  err_init_hw:
        mutex_unlock(&dev_priv->drm.struct_mutex);
  
-       WARN_ON(i915_gem_suspend(dev_priv));
+       i915_gem_suspend(dev_priv);
        i915_gem_suspend_late(dev_priv);
  
        i915_gem_drain_workqueue(dev_priv);
@@@ -5173,7 -4976,7 +4976,7 @@@ err_scratch
        i915_gem_fini_scratch(dev_priv);
  err_ggtt:
  err_unlock:
-       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+       intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
        mutex_unlock(&dev_priv->drm.struct_mutex);
  
  err_uc_misc:
                 * wedged. But we only want to do this where the GPU is angry,
                 * for all other failure, such as an allocation failure, bail.
                 */
-               if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
+               if (!i915_reset_failed(dev_priv)) {
                        i915_load_error(dev_priv,
                                        "Failed to initialize GPU, declaring it wedged!\n");
                        i915_gem_set_wedged(dev_priv);
@@@ -5305,36 -5108,7 +5108,7 @@@ static void i915_gem_init__mm(struct dr
  
  int i915_gem_init_early(struct drm_i915_private *dev_priv)
  {
-       int err = -ENOMEM;
-       dev_priv->objects = KMEM_CACHE(drm_i915_gem_object, SLAB_HWCACHE_ALIGN);
-       if (!dev_priv->objects)
-               goto err_out;
-       dev_priv->vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
-       if (!dev_priv->vmas)
-               goto err_objects;
-       dev_priv->luts = KMEM_CACHE(i915_lut_handle, 0);
-       if (!dev_priv->luts)
-               goto err_vmas;
-       dev_priv->requests = KMEM_CACHE(i915_request,
-                                       SLAB_HWCACHE_ALIGN |
-                                       SLAB_RECLAIM_ACCOUNT |
-                                       SLAB_TYPESAFE_BY_RCU);
-       if (!dev_priv->requests)
-               goto err_luts;
-       dev_priv->dependencies = KMEM_CACHE(i915_dependency,
-                                           SLAB_HWCACHE_ALIGN |
-                                           SLAB_RECLAIM_ACCOUNT);
-       if (!dev_priv->dependencies)
-               goto err_requests;
-       dev_priv->priorities = KMEM_CACHE(i915_priolist, SLAB_HWCACHE_ALIGN);
-       if (!dev_priv->priorities)
-               goto err_dependencies;
+       int err;
  
        INIT_LIST_HEAD(&dev_priv->gt.active_rings);
        INIT_LIST_HEAD(&dev_priv->gt.closed_vma);
        init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
        init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
        mutex_init(&dev_priv->gpu_error.wedge_mutex);
+       init_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
  
        atomic_set(&dev_priv->mm.bsd_engine_dispatch_index, 0);
  
                DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
  
        return 0;
- err_dependencies:
-       kmem_cache_destroy(dev_priv->dependencies);
- err_requests:
-       kmem_cache_destroy(dev_priv->requests);
- err_luts:
-       kmem_cache_destroy(dev_priv->luts);
- err_vmas:
-       kmem_cache_destroy(dev_priv->vmas);
- err_objects:
-       kmem_cache_destroy(dev_priv->objects);
- err_out:
-       return err;
  }
  
  void i915_gem_cleanup_early(struct drm_i915_private *dev_priv)
        GEM_BUG_ON(atomic_read(&dev_priv->mm.free_count));
        WARN_ON(dev_priv->mm.object_count);
  
-       kmem_cache_destroy(dev_priv->priorities);
-       kmem_cache_destroy(dev_priv->dependencies);
-       kmem_cache_destroy(dev_priv->requests);
-       kmem_cache_destroy(dev_priv->luts);
-       kmem_cache_destroy(dev_priv->vmas);
-       kmem_cache_destroy(dev_priv->objects);
-       /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
-       rcu_barrier();
+       cleanup_srcu_struct(&dev_priv->gpu_error.reset_backoff_srcu);
  
        i915_gemfs_fini(dev_priv);
  }
index 16f80a4488206a30522bd77f1841a5f80f0d4da0,3d672c9edb9416dbbd0742bd0db31a5737899e2b..c83d2a195d150b68e27482fb821f2494033b65af
@@@ -794,8 -794,8 +794,8 @@@ static int eb_wait_for_ring(const struc
         * keeping all of their resources pinned.
         */
  
-       ce = to_intel_context(eb->ctx, eb->engine);
-       if (!ce->ring) /* first use, assume empty! */
+       ce = intel_context_lookup(eb->ctx, eb->engine);
+       if (!ce || !ce->ring) /* first use, assume empty! */
                return 0;
  
        rq = __eb_wait_for_ring(ce->ring);
@@@ -849,12 -849,12 +849,12 @@@ static int eb_lookup_vmas(struct i915_e
                }
  
                vma = i915_vma_instance(obj, eb->vm, NULL);
-               if (unlikely(IS_ERR(vma))) {
+               if (IS_ERR(vma)) {
                        err = PTR_ERR(vma);
                        goto err_obj;
                }
  
-               lut = kmem_cache_alloc(eb->i915->luts, GFP_KERNEL);
+               lut = i915_lut_handle_alloc();
                if (unlikely(!lut)) {
                        err = -ENOMEM;
                        goto err_obj;
  
                err = radix_tree_insert(handles_vma, handle, vma);
                if (unlikely(err)) {
-                       kmem_cache_free(eb->i915->luts, lut);
+                       i915_lut_handle_free(lut);
                        goto err_obj;
                }
  
@@@ -1001,7 -1001,10 +1001,10 @@@ static void reloc_gpu_flush(struct relo
  {
        GEM_BUG_ON(cache->rq_size >= cache->rq->batch->obj->base.size / sizeof(u32));
        cache->rq_cmd[cache->rq_size] = MI_BATCH_BUFFER_END;
+       __i915_gem_object_flush_map(cache->rq->batch->obj, 0, cache->rq_size);
        i915_gem_object_unpin_map(cache->rq->batch->obj);
        i915_gem_chipset_flush(cache->rq->i915);
  
        i915_request_add(cache->rq);
@@@ -1214,10 -1217,6 +1217,6 @@@ static int __reloc_gpu_alloc(struct i91
        if (IS_ERR(cmd))
                return PTR_ERR(cmd);
  
-       err = i915_gem_object_set_to_wc_domain(obj, false);
-       if (err)
-               goto err_unmap;
        batch = i915_vma_instance(obj, vma->vm, NULL);
        if (IS_ERR(batch)) {
                err = PTR_ERR(batch);
@@@ -1667,7 -1666,6 +1666,7 @@@ static int eb_copy_relocations(const st
                                             len)) {
  end_user:
                                user_access_end();
 +end:
                                kvfree(relocs);
                                err = -EFAULT;
                                goto err;
                 * relocations were valid.
                 */
                if (!user_access_begin(urelocs, size))
 -                      goto end_user;
 +                      goto end;
  
                for (copied = 0; copied < nreloc; copied++)
                        unsafe_put_user(-1,
@@@ -1958,7 -1956,7 +1957,7 @@@ static int i915_reset_gen7_sol_offsets(
        u32 *cs;
        int i;
  
-       if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS) {
+       if (!IS_GEN(rq->i915, 7) || rq->engine->id != RCS0) {
                DRM_DEBUG("sol reset is gen7/rcs only\n");
                return -EINVAL;
        }
@@@ -2083,11 -2081,11 +2082,11 @@@ gen8_dispatch_bsd_engine(struct drm_i91
  #define I915_USER_RINGS (4)
  
  static const enum intel_engine_id user_ring_map[I915_USER_RINGS + 1] = {
-       [I915_EXEC_DEFAULT]     = RCS,
-       [I915_EXEC_RENDER]      = RCS,
-       [I915_EXEC_BLT]         = BCS,
-       [I915_EXEC_BSD]         = VCS,
-       [I915_EXEC_VEBOX]       = VECS
+       [I915_EXEC_DEFAULT]     = RCS0,
+       [I915_EXEC_RENDER]      = RCS0,
+       [I915_EXEC_BLT]         = BCS0,
+       [I915_EXEC_BSD]         = VCS0,
+       [I915_EXEC_VEBOX]       = VECS0
  };
  
  static struct intel_engine_cs *
@@@ -2110,7 -2108,7 +2109,7 @@@ eb_select_engine(struct drm_i915_privat
                return NULL;
        }
  
-       if (user_ring_id == I915_EXEC_BSD && HAS_BSD2(dev_priv)) {
+       if (user_ring_id == I915_EXEC_BSD && HAS_ENGINE(dev_priv, VCS1)) {
                unsigned int bsd_idx = args->flags & I915_EXEC_BSD_MASK;
  
                if (bsd_idx == I915_EXEC_BSD_DEFAULT) {
@@@ -2313,10 -2311,6 +2312,6 @@@ i915_gem_do_execbuffer(struct drm_devic
        if (args->flags & I915_EXEC_IS_PINNED)
                eb.batch_flags |= I915_DISPATCH_PINNED;
  
-       eb.engine = eb_select_engine(eb.i915, file, args);
-       if (!eb.engine)
-               return -EINVAL;
        if (args->flags & I915_EXEC_FENCE_IN) {
                in_fence = sync_file_get_fence(lower_32_bits(args->rsvd2));
                if (!in_fence)
        if (unlikely(err))
                goto err_destroy;
  
+       eb.engine = eb_select_engine(eb.i915, file, args);
+       if (!eb.engine) {
+               err = -EINVAL;
+               goto err_engine;
+       }
        /*
         * Take a local wakeref for preparing to dispatch the execbuf as
         * we expect to access the hardware fairly frequently in the
@@@ -2506,6 -2506,7 +2507,7 @@@ err_unlock
        mutex_unlock(&dev->struct_mutex);
  err_rpm:
        intel_runtime_pm_put(eb.i915, wakeref);
+ err_engine:
        i915_gem_context_put(eb.ctx);
  err_destroy:
        eb_destroy(&eb);
@@@ -2696,7 -2697,7 +2698,7 @@@ i915_gem_execbuffer2_ioctl(struct drm_d
                 * when we did the "copy_from_user()" above.
                 */
                if (!user_access_begin(user_exec_list, count * sizeof(*user_exec_list)))
 -                      goto end_user;
 +                      goto end;
  
                for (i = 0; i < args->buffer_count; i++) {
                        if (!(exec2_list[i].offset & UPDATE))
                }
  end_user:
                user_access_end();
 +end:;
        }
  
        args->flags &= ~__I915_EXEC_UNKNOWN_FLAGS;
index 41b5bcb803cb511e77a01ca239654b5a2d828dfc,36726392e737759c4ffc52840de6e745f2454082..961268f66c63c0333ba2643aa12fed67894759a3
  #include "i915_vma.h"
  
  #include "i915_drv.h"
+ #include "i915_globals.h"
  #include "intel_ringbuffer.h"
  #include "intel_frontbuffer.h"
  
  #include <drm/drm_gem.h>
  
+ static struct i915_global_vma {
+       struct i915_global base;
+       struct kmem_cache *slab_vmas;
+ } global;
+ struct i915_vma *i915_vma_alloc(void)
+ {
+       return kmem_cache_zalloc(global.slab_vmas, GFP_KERNEL);
+ }
+ void i915_vma_free(struct i915_vma *vma)
+ {
+       return kmem_cache_free(global.slab_vmas, vma);
+ }
  #if IS_ENABLED(CONFIG_DRM_I915_ERRLOG_GEM) && IS_ENABLED(CONFIG_DRM_DEBUG_MM)
  
  #include <linux/stackdepot.h>
  
  static void vma_print_allocator(struct i915_vma *vma, const char *reason)
  {
 -      unsigned long entries[12];
 -      struct stack_trace trace = {
 -              .entries = entries,
 -              .max_entries = ARRAY_SIZE(entries),
 -      };
 +      unsigned long *entries;
 +      unsigned int nr_entries;
        char buf[512];
  
        if (!vma->node.stack) {
@@@ -46,8 -65,8 +62,8 @@@
                return;
        }
  
 -      depot_fetch_stack(vma->node.stack, &trace);
 -      snprint_stack_trace(buf, sizeof(buf), &trace, 0);
 +      nr_entries = stack_depot_fetch(vma->node.stack, &entries);
 +      stack_trace_snprint(buf, sizeof(buf), entries, nr_entries, 0);
        DRM_DEBUG_DRIVER("vma.node [%08llx + %08llx] %s: inserted at %s\n",
                         vma->node.start, vma->node.size, reason, buf);
  }
@@@ -112,7 -131,7 +128,7 @@@ vma_create(struct drm_i915_gem_object *
        /* The aliasing_ppgtt should never be used directly! */
        GEM_BUG_ON(vm == &vm->i915->mm.aliasing_ppgtt->vm);
  
-       vma = kmem_cache_zalloc(vm->i915->vmas, GFP_KERNEL);
+       vma = i915_vma_alloc();
        if (vma == NULL)
                return ERR_PTR(-ENOMEM);
  
                cmp = i915_vma_compare(pos, vm, view);
                if (cmp == 0) {
                        spin_unlock(&obj->vma.lock);
-                       kmem_cache_free(vm->i915->vmas, vma);
+                       i915_vma_free(vma);
                        return pos;
                }
  
        return vma;
  
  err_vma:
-       kmem_cache_free(vm->i915->vmas, vma);
+       i915_vma_free(vma);
        return ERR_PTR(-E2BIG);
  }
  
@@@ -800,8 -819,6 +816,6 @@@ void i915_vma_reopen(struct i915_vma *v
  
  static void __i915_vma_destroy(struct i915_vma *vma)
  {
-       struct drm_i915_private *i915 = vma->vm->i915;
        GEM_BUG_ON(vma->node.allocated);
        GEM_BUG_ON(vma->fence);
  
  
        i915_active_fini(&vma->active);
  
-       kmem_cache_free(i915->vmas, vma);
+       i915_vma_free(vma);
  }
  
  void i915_vma_destroy(struct i915_vma *vma)
@@@ -1038,3 -1055,28 +1052,28 @@@ unpin
  #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
  #include "selftests/i915_vma.c"
  #endif
+ static void i915_global_vma_shrink(void)
+ {
+       kmem_cache_shrink(global.slab_vmas);
+ }
+ static void i915_global_vma_exit(void)
+ {
+       kmem_cache_destroy(global.slab_vmas);
+ }
+ static struct i915_global_vma global = { {
+       .shrink = i915_global_vma_shrink,
+       .exit = i915_global_vma_exit,
+ } };
+ int __init i915_global_vma_init(void)
+ {
+       global.slab_vmas = KMEM_CACHE(i915_vma, SLAB_HWCACHE_ALIGN);
+       if (!global.slab_vmas)
+               return -ENOMEM;
+       i915_global_register(&global.base);
+       return 0;
+ }
index a46bffe2b288fafad1db399b7828f4138cef610c,e1005d7b75fd2bbcddfedbb2bf4bf98f456dd31f..34be2cfd0ec8d14a44e60cfada3a3b891ced4b58
   *    Jesse Barnes <jesse.barnes@intel.com>
   */
  
- #include <linux/i2c.h>
- #include <linux/slab.h>
  #include <linux/delay.h>
  #include <linux/hdmi.h>
+ #include <linux/i2c.h>
+ #include <linux/slab.h>
  #include <drm/drm_atomic_helper.h>
  #include <drm/drm_crtc.h>
  #include <drm/drm_edid.h>
  #include <drm/drm_hdcp.h>
  #include <drm/drm_scdc_helper.h>
- #include "intel_drv.h"
  #include <drm/i915_drm.h>
  #include <drm/intel_lpe_audio.h>
  #include "i915_drv.h"
+ #include "intel_audio.h"
+ #include "intel_connector.h"
+ #include "intel_ddi.h"
+ #include "intel_dp.h"
+ #include "intel_drv.h"
+ #include "intel_hdcp.h"
+ #include "intel_hdmi.h"
+ #include "intel_lspcon.h"
+ #include "intel_sdvo.h"
+ #include "intel_panel.h"
  
  static struct drm_device *intel_hdmi_to_dev(struct intel_hdmi *intel_hdmi)
  {
@@@ -82,6 -93,8 +93,8 @@@ static struct intel_hdmi *intel_attache
  static u32 g4x_infoframe_index(unsigned int type)
  {
        switch (type) {
+       case HDMI_PACKET_TYPE_GAMUT_METADATA:
+               return VIDEO_DIP_SELECT_GAMUT;
        case HDMI_INFOFRAME_TYPE_AVI:
                return VIDEO_DIP_SELECT_AVI;
        case HDMI_INFOFRAME_TYPE_SPD:
  static u32 g4x_infoframe_enable(unsigned int type)
  {
        switch (type) {
+       case HDMI_PACKET_TYPE_GENERAL_CONTROL:
+               return VIDEO_DIP_ENABLE_GCP;
+       case HDMI_PACKET_TYPE_GAMUT_METADATA:
+               return VIDEO_DIP_ENABLE_GAMUT;
+       case DP_SDP_VSC:
+               return 0;
        case HDMI_INFOFRAME_TYPE_AVI:
                return VIDEO_DIP_ENABLE_AVI;
        case HDMI_INFOFRAME_TYPE_SPD:
  static u32 hsw_infoframe_enable(unsigned int type)
  {
        switch (type) {
+       case HDMI_PACKET_TYPE_GENERAL_CONTROL:
+               return VIDEO_DIP_ENABLE_GCP_HSW;
+       case HDMI_PACKET_TYPE_GAMUT_METADATA:
+               return VIDEO_DIP_ENABLE_GMP_HSW;
        case DP_SDP_VSC:
                return VIDEO_DIP_ENABLE_VSC_HSW;
        case DP_SDP_PPS:
@@@ -135,6 -158,8 +158,8 @@@ hsw_dip_data_reg(struct drm_i915_privat
                 int i)
  {
        switch (type) {
+       case HDMI_PACKET_TYPE_GAMUT_METADATA:
+               return HSW_TVIDEO_DIP_GMP_DATA(cpu_transcoder, i);
        case DP_SDP_VSC:
                return HSW_TVIDEO_DIP_VSC_DATA(cpu_transcoder, i);
        case DP_SDP_PPS:
@@@ -182,6 -207,7 +207,6 @@@ static void g4x_write_infoframe(struct 
  
        I915_WRITE(VIDEO_DIP_CTL, val);
  
 -      mmiowb();
        for (i = 0; i < len; i += 4) {
                I915_WRITE(VIDEO_DIP_DATA, *data);
                data++;
        /* Write every possible data byte to force correct ECC calculation. */
        for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
                I915_WRITE(VIDEO_DIP_DATA, 0);
 -      mmiowb();
  
        val |= g4x_infoframe_enable(type);
        val &= ~VIDEO_DIP_FREQ_MASK;
        POSTING_READ(VIDEO_DIP_CTL);
  }
  
- static bool g4x_infoframe_enabled(struct intel_encoder *encoder,
+ static void g4x_read_infoframe(struct intel_encoder *encoder,
+                              const struct intel_crtc_state *crtc_state,
+                              unsigned int type,
+                              void *frame, ssize_t len)
+ {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       u32 val, *data = frame;
+       int i;
+       val = I915_READ(VIDEO_DIP_CTL);
+       val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+       val |= g4x_infoframe_index(type);
+       I915_WRITE(VIDEO_DIP_CTL, val);
+       for (i = 0; i < len; i += 4)
+               *data++ = I915_READ(VIDEO_DIP_DATA);
+ }
+ static u32 g4x_infoframes_enabled(struct intel_encoder *encoder,
                                  const struct intel_crtc_state *pipe_config)
  {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        u32 val = I915_READ(VIDEO_DIP_CTL);
  
        if ((val & VIDEO_DIP_ENABLE) == 0)
-               return false;
+               return 0;
  
        if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
-               return false;
+               return 0;
  
        return val & (VIDEO_DIP_ENABLE_AVI |
                      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_SPD);
@@@ -235,6 -282,7 +280,6 @@@ static void ibx_write_infoframe(struct 
  
        I915_WRITE(reg, val);
  
 -      mmiowb();
        for (i = 0; i < len; i += 4) {
                I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
                data++;
        /* Write every possible data byte to force correct ECC calculation. */
        for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
                I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
 -      mmiowb();
  
        val |= g4x_infoframe_enable(type);
        val &= ~VIDEO_DIP_FREQ_MASK;
        POSTING_READ(reg);
  }
  
- static bool ibx_infoframe_enabled(struct intel_encoder *encoder,
+ static void ibx_read_infoframe(struct intel_encoder *encoder,
+                              const struct intel_crtc_state *crtc_state,
+                              unsigned int type,
+                              void *frame, ssize_t len)
+ {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       u32 val, *data = frame;
+       int i;
+       val = I915_READ(TVIDEO_DIP_CTL(crtc->pipe));
+       val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+       val |= g4x_infoframe_index(type);
+       I915_WRITE(TVIDEO_DIP_CTL(crtc->pipe), val);
+       for (i = 0; i < len; i += 4)
+               *data++ = I915_READ(TVIDEO_DIP_DATA(crtc->pipe));
+ }
+ static u32 ibx_infoframes_enabled(struct intel_encoder *encoder,
                                  const struct intel_crtc_state *pipe_config)
  {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        u32 val = I915_READ(reg);
  
        if ((val & VIDEO_DIP_ENABLE) == 0)
-               return false;
+               return 0;
  
        if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
-               return false;
+               return 0;
  
        return val & (VIDEO_DIP_ENABLE_AVI |
                      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
@@@ -294,6 -364,7 +360,6 @@@ static void cpt_write_infoframe(struct 
  
        I915_WRITE(reg, val);
  
 -      mmiowb();
        for (i = 0; i < len; i += 4) {
                I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
                data++;
        /* Write every possible data byte to force correct ECC calculation. */
        for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
                I915_WRITE(TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
 -      mmiowb();
  
        val |= g4x_infoframe_enable(type);
        val &= ~VIDEO_DIP_FREQ_MASK;
        POSTING_READ(reg);
  }
  
- static bool cpt_infoframe_enabled(struct intel_encoder *encoder,
+ static void cpt_read_infoframe(struct intel_encoder *encoder,
+                              const struct intel_crtc_state *crtc_state,
+                              unsigned int type,
+                              void *frame, ssize_t len)
+ {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       u32 val, *data = frame;
+       int i;
+       val = I915_READ(TVIDEO_DIP_CTL(crtc->pipe));
+       val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+       val |= g4x_infoframe_index(type);
+       I915_WRITE(TVIDEO_DIP_CTL(crtc->pipe), val);
+       for (i = 0; i < len; i += 4)
+               *data++ = I915_READ(TVIDEO_DIP_DATA(crtc->pipe));
+ }
+ static u32 cpt_infoframes_enabled(struct intel_encoder *encoder,
                                  const struct intel_crtc_state *pipe_config)
  {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        u32 val = I915_READ(TVIDEO_DIP_CTL(pipe));
  
        if ((val & VIDEO_DIP_ENABLE) == 0)
-               return false;
+               return 0;
  
        return val & (VIDEO_DIP_ENABLE_AVI |
                      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
@@@ -346,6 -439,7 +433,6 @@@ static void vlv_write_infoframe(struct 
  
        I915_WRITE(reg, val);
  
 -      mmiowb();
        for (i = 0; i < len; i += 4) {
                I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), *data);
                data++;
        /* Write every possible data byte to force correct ECC calculation. */
        for (; i < VIDEO_DIP_DATA_SIZE; i += 4)
                I915_WRITE(VLV_TVIDEO_DIP_DATA(intel_crtc->pipe), 0);
 -      mmiowb();
  
        val |= g4x_infoframe_enable(type);
        val &= ~VIDEO_DIP_FREQ_MASK;
        POSTING_READ(reg);
  }
  
- static bool vlv_infoframe_enabled(struct intel_encoder *encoder,
+ static void vlv_read_infoframe(struct intel_encoder *encoder,
+                              const struct intel_crtc_state *crtc_state,
+                              unsigned int type,
+                              void *frame, ssize_t len)
+ {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       u32 val, *data = frame;
+       int i;
+       val = I915_READ(VLV_TVIDEO_DIP_CTL(crtc->pipe));
+       val &= ~(VIDEO_DIP_SELECT_MASK | 0xf); /* clear DIP data offset */
+       val |= g4x_infoframe_index(type);
+       I915_WRITE(VLV_TVIDEO_DIP_CTL(crtc->pipe), val);
+       for (i = 0; i < len; i += 4)
+               *data++ = I915_READ(VLV_TVIDEO_DIP_DATA(crtc->pipe));
+ }
+ static u32 vlv_infoframes_enabled(struct intel_encoder *encoder,
                                  const struct intel_crtc_state *pipe_config)
  {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        u32 val = I915_READ(VLV_TVIDEO_DIP_CTL(pipe));
  
        if ((val & VIDEO_DIP_ENABLE) == 0)
-               return false;
+               return 0;
  
        if ((val & VIDEO_DIP_PORT_MASK) != VIDEO_DIP_PORT(encoder->port))
-               return false;
+               return 0;
  
        return val & (VIDEO_DIP_ENABLE_AVI |
                      VIDEO_DIP_ENABLE_VENDOR | VIDEO_DIP_ENABLE_GAMUT |
@@@ -398,6 -514,7 +506,6 @@@ static void hsw_write_infoframe(struct 
        val &= ~hsw_infoframe_enable(type);
        I915_WRITE(ctl_reg, val);
  
 -      mmiowb();
        for (i = 0; i < len; i += 4) {
                I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
                                            type, i >> 2), *data);
        for (; i < data_size; i += 4)
                I915_WRITE(hsw_dip_data_reg(dev_priv, cpu_transcoder,
                                            type, i >> 2), 0);
 -      mmiowb();
  
        val |= hsw_infoframe_enable(type);
        I915_WRITE(ctl_reg, val);
        POSTING_READ(ctl_reg);
  }
  
- static bool hsw_infoframe_enabled(struct intel_encoder *encoder,
+ static void hsw_read_infoframe(struct intel_encoder *encoder,
+                              const struct intel_crtc_state *crtc_state,
+                              unsigned int type,
+                              void *frame, ssize_t len)
+ {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       enum transcoder cpu_transcoder = crtc_state->cpu_transcoder;
+       u32 val, *data = frame;
+       int i;
+       val = I915_READ(HSW_TVIDEO_DIP_CTL(cpu_transcoder));
+       for (i = 0; i < len; i += 4)
+               *data++ = I915_READ(hsw_dip_data_reg(dev_priv, cpu_transcoder,
+                                                    type, i >> 2));
+ }
+ static u32 hsw_infoframes_enabled(struct intel_encoder *encoder,
                                  const struct intel_crtc_state *pipe_config)
  {
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
                      VIDEO_DIP_ENABLE_GMP_HSW | VIDEO_DIP_ENABLE_SPD_HSW);
  }
  
+ static const u8 infoframe_type_to_idx[] = {
+       HDMI_PACKET_TYPE_GENERAL_CONTROL,
+       HDMI_PACKET_TYPE_GAMUT_METADATA,
+       DP_SDP_VSC,
+       HDMI_INFOFRAME_TYPE_AVI,
+       HDMI_INFOFRAME_TYPE_SPD,
+       HDMI_INFOFRAME_TYPE_VENDOR,
+ };
+ u32 intel_hdmi_infoframe_enable(unsigned int type)
+ {
+       int i;
+       for (i = 0; i < ARRAY_SIZE(infoframe_type_to_idx); i++) {
+               if (infoframe_type_to_idx[i] == type)
+                       return BIT(i);
+       }
+       return 0;
+ }
+ u32 intel_hdmi_infoframes_enabled(struct intel_encoder *encoder,
+                                 const struct intel_crtc_state *crtc_state)
+ {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_digital_port *dig_port = enc_to_dig_port(&encoder->base);
+       u32 val, ret = 0;
+       int i;
+       val = dig_port->infoframes_enabled(encoder, crtc_state);
+       /* map from hardware bits to dip idx */
+       for (i = 0; i < ARRAY_SIZE(infoframe_type_to_idx); i++) {
+               unsigned int type = infoframe_type_to_idx[i];
+               if (HAS_DDI(dev_priv)) {
+                       if (val & hsw_infoframe_enable(type))
+                               ret |= BIT(i);
+               } else {
+                       if (val & g4x_infoframe_enable(type))
+                               ret |= BIT(i);
+               }
+       }
+       return ret;
+ }
  /*
   * The data we write to the DIP data buffer registers is 1 byte bigger than the
   * HDMI infoframe size because of an ECC/reserved byte at position 3 (starting
   */
  static void intel_write_infoframe(struct intel_encoder *encoder,
                                  const struct intel_crtc_state *crtc_state,
-                                 union hdmi_infoframe *frame)
+                                 enum hdmi_infoframe_type type,
+                                 const union hdmi_infoframe *frame)
  {
        struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
        u8 buffer[VIDEO_DIP_DATA_SIZE];
        ssize_t len;
  
+       if ((crtc_state->infoframes.enable &
+            intel_hdmi_infoframe_enable(type)) == 0)
+               return;
+       if (WARN_ON(frame->any.type != type))
+               return;
        /* see comment above for the reason for this offset */
-       len = hdmi_infoframe_pack(frame, buffer + 1, sizeof(buffer) - 1);
-       if (len < 0)
+       len = hdmi_infoframe_pack_only(frame, buffer + 1, sizeof(buffer) - 1);
+       if (WARN_ON(len < 0))
                return;
  
        /* Insert the 'hole' (see big comment above) at position 3 */
        buffer[3] = 0;
        len++;
  
-       intel_dig_port->write_infoframe(encoder,
-                                       crtc_state,
-                                       frame->any.type, buffer, len);
+       intel_dig_port->write_infoframe(encoder, crtc_state, type, buffer, len);
  }
  
- static void intel_hdmi_set_avi_infoframe(struct intel_encoder *encoder,
-                                        const struct intel_crtc_state *crtc_state,
-                                        const struct drm_connector_state *conn_state)
+ void intel_read_infoframe(struct intel_encoder *encoder,
+                         const struct intel_crtc_state *crtc_state,
+                         enum hdmi_infoframe_type type,
+                         union hdmi_infoframe *frame)
+ {
+       struct intel_digital_port *intel_dig_port = enc_to_dig_port(&encoder->base);
+       u8 buffer[VIDEO_DIP_DATA_SIZE];
+       int ret;
+       if ((crtc_state->infoframes.enable &
+            intel_hdmi_infoframe_enable(type)) == 0)
+               return;
+       intel_dig_port->read_infoframe(encoder, crtc_state,
+                                      type, buffer, sizeof(buffer));
+       /* Fill the 'hole' (see big comment above) at position 3 */
+       memmove(&buffer[1], &buffer[0], 3);
+       /* see comment above for the reason for this offset */
+       ret = hdmi_infoframe_unpack(frame, buffer + 1, sizeof(buffer) - 1);
+       if (ret) {
+               DRM_DEBUG_KMS("Failed to unpack infoframe type 0x%02x\n", type);
+               return;
+       }
+       if (frame->any.type != type)
+               DRM_DEBUG_KMS("Found the wrong infoframe type 0x%x (expected 0x%02x)\n",
+                             frame->any.type, type);
+ }
+ static bool
+ intel_hdmi_compute_avi_infoframe(struct intel_encoder *encoder,
+                                struct intel_crtc_state *crtc_state,
+                                struct drm_connector_state *conn_state)
  {
+       struct hdmi_avi_infoframe *frame = &crtc_state->infoframes.avi.avi;
        const struct drm_display_mode *adjusted_mode =
                &crtc_state->base.adjusted_mode;
-       union hdmi_infoframe frame;
+       struct drm_connector *connector = conn_state->connector;
        int ret;
  
-       ret = drm_hdmi_avi_infoframe_from_display_mode(&frame.avi,
-                                                      conn_state->connector,
+       if (!crtc_state->has_infoframe)
+               return true;
+       crtc_state->infoframes.enable |=
+               intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_AVI);
+       ret = drm_hdmi_avi_infoframe_from_display_mode(frame, connector,
                                                       adjusted_mode);
-       if (ret < 0) {
-               DRM_ERROR("couldn't fill AVI infoframe\n");
-               return;
-       }
+       if (ret)
+               return false;
  
        if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420)
-               frame.avi.colorspace = HDMI_COLORSPACE_YUV420;
+               frame->colorspace = HDMI_COLORSPACE_YUV420;
        else if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR444)
-               frame.avi.colorspace = HDMI_COLORSPACE_YUV444;
+               frame->colorspace = HDMI_COLORSPACE_YUV444;
        else
-               frame.avi.colorspace = HDMI_COLORSPACE_RGB;
+               frame->colorspace = HDMI_COLORSPACE_RGB;
+       drm_hdmi_avi_infoframe_colorspace(frame, conn_state);
  
-       drm_hdmi_avi_infoframe_quant_range(&frame.avi,
-                                          conn_state->connector,
+       drm_hdmi_avi_infoframe_quant_range(frame, connector,
                                           adjusted_mode,
                                           crtc_state->limited_color_range ?
                                           HDMI_QUANTIZATION_RANGE_LIMITED :
                                           HDMI_QUANTIZATION_RANGE_FULL);
  
-       drm_hdmi_avi_infoframe_content_type(&frame.avi,
-                                           conn_state);
+       drm_hdmi_avi_infoframe_content_type(frame, conn_state);
  
        /* TODO: handle pixel repetition for YCBCR420 outputs */
-       intel_write_infoframe(encoder, crtc_state,
-                             &frame);
+       ret = hdmi_avi_infoframe_check(frame);
+       if (WARN_ON(ret))
+               return false;
+       return true;
  }
  
- static void intel_hdmi_set_spd_infoframe(struct intel_encoder *encoder,
-                                        const struct intel_crtc_state *crtc_state)
+ static bool
+ intel_hdmi_compute_spd_infoframe(struct intel_encoder *encoder,
+                                struct intel_crtc_state *crtc_state,
+                                struct drm_connector_state *conn_state)
  {
-       union hdmi_infoframe frame;
+       struct hdmi_spd_infoframe *frame = &crtc_state->infoframes.spd.spd;
        int ret;
  
-       ret = hdmi_spd_infoframe_init(&frame.spd, "Intel", "Integrated gfx");
-       if (ret < 0) {
-               DRM_ERROR("couldn't fill SPD infoframe\n");
-               return;
-       }
+       if (!crtc_state->has_infoframe)
+               return true;
  
-       frame.spd.sdi = HDMI_SPD_SDI_PC;
+       crtc_state->infoframes.enable |=
+               intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_SPD);
  
-       intel_write_infoframe(encoder, crtc_state,
-                             &frame);
+       ret = hdmi_spd_infoframe_init(frame, "Intel", "Integrated gfx");
+       if (WARN_ON(ret))
+               return false;
+       frame->sdi = HDMI_SPD_SDI_PC;
+       ret = hdmi_spd_infoframe_check(frame);
+       if (WARN_ON(ret))
+               return false;
+       return true;
  }
  
- static void
- intel_hdmi_set_hdmi_infoframe(struct intel_encoder *encoder,
-                             const struct intel_crtc_state *crtc_state,
-                             const struct drm_connector_state *conn_state)
- {
-       union hdmi_infoframe frame;
+ static bool
+ intel_hdmi_compute_hdmi_infoframe(struct intel_encoder *encoder,
+                                 struct intel_crtc_state *crtc_state,
+                                 struct drm_connector_state *conn_state)
+ {
+       struct hdmi_vendor_infoframe *frame =
+               &crtc_state->infoframes.hdmi.vendor.hdmi;
+       const struct drm_display_info *info =
+               &conn_state->connector->display_info;
        int ret;
  
-       ret = drm_hdmi_vendor_infoframe_from_display_mode(&frame.vendor.hdmi,
+       if (!crtc_state->has_infoframe || !info->has_hdmi_infoframe)
+               return true;
+       crtc_state->infoframes.enable |=
+               intel_hdmi_infoframe_enable(HDMI_INFOFRAME_TYPE_VENDOR);
+       ret = drm_hdmi_vendor_infoframe_from_display_mode(frame,
                                                          conn_state->connector,
                                                          &crtc_state->base.adjusted_mode);
-       if (ret < 0)
-               return;
+       if (WARN_ON(ret))
+               return false;
  
-       intel_write_infoframe(encoder, crtc_state,
-                             &frame);
+       ret = hdmi_vendor_infoframe_check(frame);
+       if (WARN_ON(ret))
+               return false;
+       return true;
  }
  
  static void g4x_set_infoframes(struct intel_encoder *encoder,
        I915_WRITE(reg, val);
        POSTING_READ(reg);
  
-       intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
-       intel_hdmi_set_spd_infoframe(encoder, crtc_state);
-       intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
+       intel_write_infoframe(encoder, crtc_state,
+                             HDMI_INFOFRAME_TYPE_AVI,
+                             &crtc_state->infoframes.avi);
+       intel_write_infoframe(encoder, crtc_state,
+                             HDMI_INFOFRAME_TYPE_SPD,
+                             &crtc_state->infoframes.spd);
+       intel_write_infoframe(encoder, crtc_state,
+                             HDMI_INFOFRAME_TYPE_VENDOR,
+                             &crtc_state->infoframes.hdmi);
  }
  
  static bool hdmi_sink_is_deep_color(const struct drm_connector_state *conn_state)
@@@ -664,7 -919,10 +909,10 @@@ static bool intel_hdmi_set_gcp_infofram
        struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
        struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
        i915_reg_t reg;
-       u32 val = 0;
+       if ((crtc_state->infoframes.enable &
+            intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0)
+               return false;
  
        if (HAS_DDI(dev_priv))
                reg = HSW_TVIDEO_DIP_GCP(crtc_state->cpu_transcoder);
        else
                return false;
  
+       I915_WRITE(reg, crtc_state->infoframes.gcp);
+       return true;
+ }
+ void intel_hdmi_read_gcp_infoframe(struct intel_encoder *encoder,
+                                  struct intel_crtc_state *crtc_state)
+ {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       struct intel_crtc *crtc = to_intel_crtc(crtc_state->base.crtc);
+       i915_reg_t reg;
+       if ((crtc_state->infoframes.enable &
+            intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL)) == 0)
+               return;
+       if (HAS_DDI(dev_priv))
+               reg = HSW_TVIDEO_DIP_GCP(crtc_state->cpu_transcoder);
+       else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+               reg = VLV_TVIDEO_DIP_GCP(crtc->pipe);
+       else if (HAS_PCH_SPLIT(dev_priv))
+               reg = TVIDEO_DIP_GCP(crtc->pipe);
+       else
+               return;
+       crtc_state->infoframes.gcp = I915_READ(reg);
+ }
+ static void intel_hdmi_compute_gcp_infoframe(struct intel_encoder *encoder,
+                                            struct intel_crtc_state *crtc_state,
+                                            struct drm_connector_state *conn_state)
+ {
+       struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+       if (IS_G4X(dev_priv) || !crtc_state->has_infoframe)
+               return;
+       crtc_state->infoframes.enable |=
+               intel_hdmi_infoframe_enable(HDMI_PACKET_TYPE_GENERAL_CONTROL);
        /* Indicate color depth whenever the sink supports deep color */
        if (hdmi_sink_is_deep_color(conn_state))
-               val |= GCP_COLOR_INDICATION;
+               crtc_state->infoframes.gcp |= GCP_COLOR_INDICATION;
  
        /* Enable default_phase whenever the display mode is suitably aligned */
        if (gcp_default_phase_possible(crtc_state->pipe_bpp,
                                       &crtc_state->base.adjusted_mode))
-               val |= GCP_DEFAULT_PHASE_ENABLE;
-       I915_WRITE(reg, val);
-       return val != 0;
+               crtc_state->infoframes.gcp |= GCP_DEFAULT_PHASE_ENABLE;
  }
  
  static void ibx_set_infoframes(struct intel_encoder *encoder,
        I915_WRITE(reg, val);
        POSTING_READ(reg);
  
-       intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
-       intel_hdmi_set_spd_infoframe(encoder, crtc_state);
-       intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
+       intel_write_infoframe(encoder, crtc_state,
+                             HDMI_INFOFRAME_TYPE_AVI,
+                             &crtc_state->infoframes.avi);
+       intel_write_infoframe(encoder, crtc_state,
+                             HDMI_INFOFRAME_TYPE_SPD,
+                             &crtc_state->infoframes.spd);
+       intel_write_infoframe(encoder, crtc_state,
+                             HDMI_INFOFRAME_TYPE_VENDOR,
+                             &crtc_state->infoframes.hdmi);
  }
  
  static void cpt_set_infoframes(struct intel_encoder *encoder,
        I915_WRITE(reg, val);
        POSTING_READ(reg);
  
-       intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
-       intel_hdmi_set_spd_infoframe(encoder, crtc_state);
-       intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
+       intel_write_infoframe(encoder, crtc_state,
+                             HDMI_INFOFRAME_TYPE_AVI,
+                             &crtc_state->infoframes.avi);
+       intel_write_infoframe(encoder, crtc_state,
+                             HDMI_INFOFRAME_TYPE_SPD,
+                             &crtc_state->infoframes.spd);
+       intel_write_infoframe(encoder, crtc_state,
+                             HDMI_INFOFRAME_TYPE_VENDOR,
+                             &crtc_state->infoframes.hdmi);
  }
  
  static void vlv_set_infoframes(struct intel_encoder *encoder,
        I915_WRITE(reg, val);
        POSTING_READ(reg);
  
-       intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
-       intel_hdmi_set_spd_infoframe(encoder, crtc_state);
-       intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
+       intel_write_infoframe(encoder, crtc_state,
+                             HDMI_INFOFRAME_TYPE_AVI,
+                             &crtc_state->infoframes.avi);
+       intel_write_infoframe(encoder, crtc_state,
+                             HDMI_INFOFRAME_TYPE_SPD,
+                             &crtc_state->infoframes.spd);
+       intel_write_infoframe(encoder, crtc_state,
+                             HDMI_INFOFRAME_TYPE_VENDOR,
+                             &crtc_state->infoframes.hdmi);
  }
  
  static void hsw_set_infoframes(struct intel_encoder *encoder,
        I915_WRITE(reg, val);
        POSTING_READ(reg);
  
-       intel_hdmi_set_avi_infoframe(encoder, crtc_state, conn_state);
-       intel_hdmi_set_spd_infoframe(encoder, crtc_state);
-       intel_hdmi_set_hdmi_infoframe(encoder, crtc_state, conn_state);
+       intel_write_infoframe(encoder, crtc_state,
+                             HDMI_INFOFRAME_TYPE_AVI,
+                             &crtc_state->infoframes.avi);
+       intel_write_infoframe(encoder, crtc_state,
+                             HDMI_INFOFRAME_TYPE_SPD,
+                             &crtc_state->infoframes.spd);
+       intel_write_infoframe(encoder, crtc_state,
+                             HDMI_INFOFRAME_TYPE_VENDOR,
+                             &crtc_state->infoframes.hdmi);
  }
  
  void intel_dp_dual_mode_set_tmds_output(struct intel_hdmi *hdmi, bool enable)
@@@ -1073,10 -1391,44 +1381,44 @@@ int intel_hdmi_hdcp_read_v_prime_part(s
        return ret;
  }
  
+ static int kbl_repositioning_enc_en_signal(struct intel_connector *connector)
+ {
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
+       struct intel_digital_port *intel_dig_port = conn_to_dig_port(connector);
+       struct drm_crtc *crtc = connector->base.state->crtc;
+       struct intel_crtc *intel_crtc = container_of(crtc,
+                                                    struct intel_crtc, base);
+       u32 scanline;
+       int ret;
+       for (;;) {
+               scanline = I915_READ(PIPEDSL(intel_crtc->pipe));
+               if (scanline > 100 && scanline < 200)
+                       break;
+               usleep_range(25, 50);
+       }
+       ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, false);
+       if (ret) {
+               DRM_ERROR("Disable HDCP signalling failed (%d)\n", ret);
+               return ret;
+       }
+       ret = intel_ddi_toggle_hdcp_signalling(&intel_dig_port->base, true);
+       if (ret) {
+               DRM_ERROR("Enable HDCP signalling failed (%d)\n", ret);
+               return ret;
+       }
+       return 0;
+ }
  static
  int intel_hdmi_hdcp_toggle_signalling(struct intel_digital_port *intel_dig_port,
                                      bool enable)
  {
+       struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+       struct intel_connector *connector = hdmi->attached_connector;
+       struct drm_i915_private *dev_priv = to_i915(connector->base.dev);
        int ret;
  
        if (!enable)
                          enable ? "Enable" : "Disable", ret);
                return ret;
        }
+       /*
+        * WA: To fix incorrect positioning of the window of
+        * opportunity and enc_en signalling in KABYLAKE.
+        */
+       if (IS_KABYLAKE(dev_priv) && enable)
+               return kbl_repositioning_enc_en_signal(connector);
        return 0;
  }
  
@@@ -1119,6 -1479,190 +1469,190 @@@ bool intel_hdmi_hdcp_check_link(struct 
        return true;
  }
  
+ static struct hdcp2_hdmi_msg_data {
+       u8 msg_id;
+       u32 timeout;
+       u32 timeout2;
+       } hdcp2_msg_data[] = {
+               {HDCP_2_2_AKE_INIT, 0, 0},
+               {HDCP_2_2_AKE_SEND_CERT, HDCP_2_2_CERT_TIMEOUT_MS, 0},
+               {HDCP_2_2_AKE_NO_STORED_KM, 0, 0},
+               {HDCP_2_2_AKE_STORED_KM, 0, 0},
+               {HDCP_2_2_AKE_SEND_HPRIME, HDCP_2_2_HPRIME_PAIRED_TIMEOUT_MS,
+                               HDCP_2_2_HPRIME_NO_PAIRED_TIMEOUT_MS},
+               {HDCP_2_2_AKE_SEND_PAIRING_INFO, HDCP_2_2_PAIRING_TIMEOUT_MS,
+                               0},
+               {HDCP_2_2_LC_INIT, 0, 0},
+               {HDCP_2_2_LC_SEND_LPRIME, HDCP_2_2_HDMI_LPRIME_TIMEOUT_MS, 0},
+               {HDCP_2_2_SKE_SEND_EKS, 0, 0},
+               {HDCP_2_2_REP_SEND_RECVID_LIST,
+                               HDCP_2_2_RECVID_LIST_TIMEOUT_MS, 0},
+               {HDCP_2_2_REP_SEND_ACK, 0, 0},
+               {HDCP_2_2_REP_STREAM_MANAGE, 0, 0},
+               {HDCP_2_2_REP_STREAM_READY, HDCP_2_2_STREAM_READY_TIMEOUT_MS,
+                               0},
+       };
+ static
+ int intel_hdmi_hdcp2_read_rx_status(struct intel_digital_port *intel_dig_port,
+                                   u8 *rx_status)
+ {
+       return intel_hdmi_hdcp_read(intel_dig_port,
+                                   HDCP_2_2_HDMI_REG_RXSTATUS_OFFSET,
+                                   rx_status,
+                                   HDCP_2_2_HDMI_RXSTATUS_LEN);
+ }
+ static int get_hdcp2_msg_timeout(u8 msg_id, bool is_paired)
+ {
+       int i;
+       for (i = 0; i < ARRAY_SIZE(hdcp2_msg_data); i++)
+               if (hdcp2_msg_data[i].msg_id == msg_id &&
+                   (msg_id != HDCP_2_2_AKE_SEND_HPRIME || is_paired))
+                       return hdcp2_msg_data[i].timeout;
+               else if (hdcp2_msg_data[i].msg_id == msg_id)
+                       return hdcp2_msg_data[i].timeout2;
+       return -EINVAL;
+ }
+ static inline
+ int hdcp2_detect_msg_availability(struct intel_digital_port *intel_digital_port,
+                                 u8 msg_id, bool *msg_ready,
+                                 ssize_t *msg_sz)
+ {
+       u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
+       int ret;
+       ret = intel_hdmi_hdcp2_read_rx_status(intel_digital_port, rx_status);
+       if (ret < 0) {
+               DRM_DEBUG_KMS("rx_status read failed. Err %d\n", ret);
+               return ret;
+       }
+       *msg_sz = ((HDCP_2_2_HDMI_RXSTATUS_MSG_SZ_HI(rx_status[1]) << 8) |
+                 rx_status[0]);
+       if (msg_id == HDCP_2_2_REP_SEND_RECVID_LIST)
+               *msg_ready = (HDCP_2_2_HDMI_RXSTATUS_READY(rx_status[1]) &&
+                            *msg_sz);
+       else
+               *msg_ready = *msg_sz;
+       return 0;
+ }
+ static ssize_t
+ intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *intel_dig_port,
+                             u8 msg_id, bool paired)
+ {
+       bool msg_ready = false;
+       int timeout, ret;
+       ssize_t msg_sz = 0;
+       timeout = get_hdcp2_msg_timeout(msg_id, paired);
+       if (timeout < 0)
+               return timeout;
+       ret = __wait_for(ret = hdcp2_detect_msg_availability(intel_dig_port,
+                                                            msg_id, &msg_ready,
+                                                            &msg_sz),
+                        !ret && msg_ready && msg_sz, timeout * 1000,
+                        1000, 5 * 1000);
+       if (ret)
+               DRM_DEBUG_KMS("msg_id: %d, ret: %d, timeout: %d\n",
+                             msg_id, ret, timeout);
+       return ret ? ret : msg_sz;
+ }
+ static
+ int intel_hdmi_hdcp2_write_msg(struct intel_digital_port *intel_dig_port,
+                              void *buf, size_t size)
+ {
+       unsigned int offset;
+       offset = HDCP_2_2_HDMI_REG_WR_MSG_OFFSET;
+       return intel_hdmi_hdcp_write(intel_dig_port, offset, buf, size);
+ }
+ static
+ int intel_hdmi_hdcp2_read_msg(struct intel_digital_port *intel_dig_port,
+                             u8 msg_id, void *buf, size_t size)
+ {
+       struct intel_hdmi *hdmi = &intel_dig_port->hdmi;
+       struct intel_hdcp *hdcp = &hdmi->attached_connector->hdcp;
+       unsigned int offset;
+       ssize_t ret;
+       ret = intel_hdmi_hdcp2_wait_for_msg(intel_dig_port, msg_id,
+                                           hdcp->is_paired);
+       if (ret < 0)
+               return ret;
+       /*
+        * Available msg size should be equal to or lesser than the
+        * available buffer.
+        */
+       if (ret > size) {
+               DRM_DEBUG_KMS("msg_sz(%zd) is more than exp size(%zu)\n",
+                             ret, size);
+               return -1;
+       }
+       offset = HDCP_2_2_HDMI_REG_RD_MSG_OFFSET;
+       ret = intel_hdmi_hdcp_read(intel_dig_port, offset, buf, ret);
+       if (ret)
+               DRM_DEBUG_KMS("Failed to read msg_id: %d(%zd)\n", msg_id, ret);
+       return ret;
+ }
+ static
+ int intel_hdmi_hdcp2_check_link(struct intel_digital_port *intel_dig_port)
+ {
+       u8 rx_status[HDCP_2_2_HDMI_RXSTATUS_LEN];
+       int ret;
+       ret = intel_hdmi_hdcp2_read_rx_status(intel_dig_port, rx_status);
+       if (ret)
+               return ret;
+       /*
+        * Re-auth request and Link Integrity Failures are represented by
+        * same bit. i.e reauth_req.
+        */
+       if (HDCP_2_2_HDMI_RXSTATUS_REAUTH_REQ(rx_status[1]))
+               ret = HDCP_REAUTH_REQUEST;
+       else if (HDCP_2_2_HDMI_RXSTATUS_READY(rx_status[1]))
+               ret = HDCP_TOPOLOGY_CHANGE;
+       return ret;
+ }
+ static
+ int intel_hdmi_hdcp2_capable(struct intel_digital_port *intel_dig_port,
+                            bool *capable)
+ {
+       u8 hdcp2_version;
+       int ret;
+       *capable = false;
+       ret = intel_hdmi_hdcp_read(intel_dig_port, HDCP_2_2_HDMI_REG_VER_OFFSET,
+                                  &hdcp2_version, sizeof(hdcp2_version));
+       if (!ret && hdcp2_version & HDCP_2_2_HDMI_SUPPORT_MASK)
+               *capable = true;
+       return ret;
+ }
+ static inline
+ enum hdcp_wired_protocol intel_hdmi_hdcp2_protocol(void)
+ {
+       return HDCP_PROTOCOL_HDMI;
+ }
  static const struct intel_hdcp_shim intel_hdmi_hdcp_shim = {
        .write_an_aksv = intel_hdmi_hdcp_write_an_aksv,
        .read_bksv = intel_hdmi_hdcp_read_bksv,
        .read_v_prime_part = intel_hdmi_hdcp_read_v_prime_part,
        .toggle_signalling = intel_hdmi_hdcp_toggle_signalling,
        .check_link = intel_hdmi_hdcp_check_link,
+       .write_2_2_msg = intel_hdmi_hdcp2_write_msg,
+       .read_2_2_msg = intel_hdmi_hdcp2_read_msg,
+       .check_2_2_link = intel_hdmi_hdcp2_check_link,
+       .hdcp_2_2_capable = intel_hdmi_hdcp2_capable,
+       .protocol = HDCP_PROTOCOL_HDMI,
  };
  
  static void intel_hdmi_prepare(struct intel_encoder *encoder,
@@@ -1195,7 -1744,6 +1734,6 @@@ static void intel_hdmi_get_config(struc
                                  struct intel_crtc_state *pipe_config)
  {
        struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
-       struct intel_digital_port *intel_dig_port = hdmi_to_dig_port(intel_hdmi);
        struct drm_device *dev = encoder->base.dev;
        struct drm_i915_private *dev_priv = to_i915(dev);
        u32 tmp, flags = 0;
        if (tmp & HDMI_MODE_SELECT_HDMI)
                pipe_config->has_hdmi_sink = true;
  
-       if (intel_dig_port->infoframe_enabled(encoder, pipe_config))
+       pipe_config->infoframes.enable |=
+               intel_hdmi_infoframes_enabled(encoder, pipe_config);
+       if (pipe_config->infoframes.enable)
                pipe_config->has_infoframe = true;
  
        if (tmp & SDVO_AUDIO_ENABLE)
        pipe_config->base.adjusted_mode.crtc_clock = dotclock;
  
        pipe_config->lane_count = 4;
+       intel_hdmi_read_gcp_infoframe(encoder, pipe_config);
+       intel_read_infoframe(encoder, pipe_config,
+                            HDMI_INFOFRAME_TYPE_AVI,
+                            &pipe_config->infoframes.avi);
+       intel_read_infoframe(encoder, pipe_config,
+                            HDMI_INFOFRAME_TYPE_SPD,
+                            &pipe_config->infoframes.spd);
+       intel_read_infoframe(encoder, pipe_config,
+                            HDMI_INFOFRAME_TYPE_VENDOR,
+                            &pipe_config->infoframes.hdmi);
  }
  
  static void intel_enable_hdmi_audio(struct intel_encoder *encoder,
@@@ -1654,7 -2217,7 +2207,7 @@@ static bool hdmi_deep_color_possible(co
  
        /* Display Wa_1405510057:icl */
        if (crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 &&
-           bpc == 10 && IS_ICELAKE(dev_priv) &&
+           bpc == 10 && INTEL_GEN(dev_priv) >= 11 &&
            (adjusted_mode->crtc_hblank_end -
             adjusted_mode->crtc_hblank_start) % 8 == 2)
                return false;
@@@ -1812,6 -2375,23 +2365,23 @@@ int intel_hdmi_compute_config(struct in
                }
        }
  
+       intel_hdmi_compute_gcp_infoframe(encoder, pipe_config, conn_state);
+       if (!intel_hdmi_compute_avi_infoframe(encoder, pipe_config, conn_state)) {
+               DRM_DEBUG_KMS("bad AVI infoframe\n");
+               return -EINVAL;
+       }
+       if (!intel_hdmi_compute_spd_infoframe(encoder, pipe_config, conn_state)) {
+               DRM_DEBUG_KMS("bad SPD infoframe\n");
+               return -EINVAL;
+       }
+       if (!intel_hdmi_compute_hdmi_infoframe(encoder, pipe_config, conn_state)) {
+               DRM_DEBUG_KMS("bad HDMI infoframe\n");
+               return -EINVAL;
+       }
        return 0;
  }
  
@@@ -1931,7 -2511,7 +2501,7 @@@ intel_hdmi_detect(struct drm_connector 
  
        wakeref = intel_display_power_get(dev_priv, POWER_DOMAIN_GMBUS);
  
-       if (IS_ICELAKE(dev_priv) &&
+       if (INTEL_GEN(dev_priv) >= 11 &&
            !intel_digital_port_connected(encoder))
                goto out;
  
@@@ -2133,10 -2713,21 +2703,21 @@@ static voi
  intel_hdmi_add_properties(struct intel_hdmi *intel_hdmi, struct drm_connector *connector)
  {
        struct drm_i915_private *dev_priv = to_i915(connector->dev);
+       struct intel_digital_port *intel_dig_port =
+                               hdmi_to_dig_port(intel_hdmi);
  
        intel_attach_force_audio_property(connector);
        intel_attach_broadcast_rgb_property(connector);
        intel_attach_aspect_ratio_property(connector);
+       /*
+        * Attach Colorspace property for Non LSPCON based device
+        * ToDo: This needs to be extended for LSPCON implementation
+        * as well. Will be implemented separately.
+        */
+       if (!intel_dig_port->lspcon.active)
+               intel_attach_colorspace_property(connector);
        drm_connector_attach_content_type_property(connector);
        connector->state->picture_aspect_ratio = HDMI_PICTURE_ASPECT_NONE;
  
@@@ -2321,14 -2912,14 +2902,14 @@@ static u8 intel_hdmi_ddc_pin(struct drm
                return info->alternate_ddc_pin;
        }
  
-       if (IS_CHERRYVIEW(dev_priv))
-               ddc_pin = chv_port_to_ddc_pin(dev_priv, port);
-       else if (IS_GEN9_LP(dev_priv))
-               ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
+       if (HAS_PCH_ICP(dev_priv))
+               ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
        else if (HAS_PCH_CNP(dev_priv))
                ddc_pin = cnp_port_to_ddc_pin(dev_priv, port);
-       else if (HAS_PCH_ICP(dev_priv))
-               ddc_pin = icl_port_to_ddc_pin(dev_priv, port);
+       else if (IS_GEN9_LP(dev_priv))
+               ddc_pin = bxt_port_to_ddc_pin(dev_priv, port);
+       else if (IS_CHERRYVIEW(dev_priv))
+               ddc_pin = chv_port_to_ddc_pin(dev_priv, port);
        else
                ddc_pin = g4x_port_to_ddc_pin(dev_priv, port);
  
@@@ -2345,33 -2936,36 +2926,36 @@@ void intel_infoframe_init(struct intel_
  
        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
                intel_dig_port->write_infoframe = vlv_write_infoframe;
+               intel_dig_port->read_infoframe = vlv_read_infoframe;
                intel_dig_port->set_infoframes = vlv_set_infoframes;
-               intel_dig_port->infoframe_enabled = vlv_infoframe_enabled;
+               intel_dig_port->infoframes_enabled = vlv_infoframes_enabled;
        } else if (IS_G4X(dev_priv)) {
                intel_dig_port->write_infoframe = g4x_write_infoframe;
+               intel_dig_port->read_infoframe = g4x_read_infoframe;
                intel_dig_port->set_infoframes = g4x_set_infoframes;
-               intel_dig_port->infoframe_enabled = g4x_infoframe_enabled;
+               intel_dig_port->infoframes_enabled = g4x_infoframes_enabled;
        } else if (HAS_DDI(dev_priv)) {
                if (intel_dig_port->lspcon.active) {
-                       intel_dig_port->write_infoframe =
-                                       lspcon_write_infoframe;
+                       intel_dig_port->write_infoframe = lspcon_write_infoframe;
+                       intel_dig_port->read_infoframe = lspcon_read_infoframe;
                        intel_dig_port->set_infoframes = lspcon_set_infoframes;
-                       intel_dig_port->infoframe_enabled =
-                                               lspcon_infoframe_enabled;
+                       intel_dig_port->infoframes_enabled = lspcon_infoframes_enabled;
                } else {
-                       intel_dig_port->set_infoframes = hsw_set_infoframes;
-                       intel_dig_port->infoframe_enabled =
-                                               hsw_infoframe_enabled;
                        intel_dig_port->write_infoframe = hsw_write_infoframe;
+                       intel_dig_port->read_infoframe = hsw_read_infoframe;
+                       intel_dig_port->set_infoframes = hsw_set_infoframes;
+                       intel_dig_port->infoframes_enabled = hsw_infoframes_enabled;
                }
        } else if (HAS_PCH_IBX(dev_priv)) {
                intel_dig_port->write_infoframe = ibx_write_infoframe;
+               intel_dig_port->read_infoframe = ibx_read_infoframe;
                intel_dig_port->set_infoframes = ibx_set_infoframes;
-               intel_dig_port->infoframe_enabled = ibx_infoframe_enabled;
+               intel_dig_port->infoframes_enabled = ibx_infoframes_enabled;
        } else {
                intel_dig_port->write_infoframe = cpt_write_infoframe;
+               intel_dig_port->read_infoframe = cpt_read_infoframe;
                intel_dig_port->set_infoframes = cpt_set_infoframes;
-               intel_dig_port->infoframe_enabled = cpt_infoframe_enabled;
+               intel_dig_port->infoframes_enabled = cpt_infoframes_enabled;
        }
  }
  
@@@ -2417,6 -3011,9 +3001,9 @@@ void intel_hdmi_init_connector(struct i
  
        intel_hdmi_add_properties(intel_hdmi, connector);
  
+       intel_connector_attach_encoder(intel_connector, intel_encoder);
+       intel_hdmi->attached_connector = intel_connector;
        if (is_hdcp_supported(dev_priv, port)) {
                int ret = intel_hdcp_init(intel_connector,
                                          &intel_hdmi_hdcp_shim);
                        DRM_DEBUG_KMS("HDCP init failed, skipping.\n");
        }
  
-       intel_connector_attach_encoder(intel_connector, intel_encoder);
-       intel_hdmi->attached_connector = intel_connector;
        /* For G4X desktop chip, PEG_BAND_GAP_DATA 3:0 must first be written
         * 0xd.  Failure to do so will result in spurious interrupts being
         * generated on the port when a cable is not attached.
index 20c4434474e3a504060370c3f7d0d701969fff7f,d4f4262d0fee15713209fbe2a2469ba5dd607d90..6150e35bf7b5dabff659ce33b4da522bf171b71b
  #include <drm/drm_print.h>
  
  #include "i915_drv.h"
+ #include "intel_cdclk.h"
+ #include "intel_crt.h"
+ #include "intel_csr.h"
+ #include "intel_dp.h"
  #include "intel_drv.h"
  
  /**
  static noinline depot_stack_handle_t __save_depot_stack(void)
  {
        unsigned long entries[STACKDEPTH];
 -      struct stack_trace trace = {
 -              .entries = entries,
 -              .max_entries = ARRAY_SIZE(entries),
 -              .skip = 1,
 -      };
 +      unsigned int n;
  
 -      save_stack_trace(&trace);
 -      if (trace.nr_entries &&
 -          trace.entries[trace.nr_entries - 1] == ULONG_MAX)
 -              trace.nr_entries--;
 -
 -      return depot_save_stack(&trace, GFP_NOWAIT | __GFP_NOWARN);
 +      n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
 +      return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
  }
  
  static void __print_depot_stack(depot_stack_handle_t stack,
                                char *buf, int sz, int indent)
  {
 -      unsigned long entries[STACKDEPTH];
 -      struct stack_trace trace = {
 -              .entries = entries,
 -              .max_entries = ARRAY_SIZE(entries),
 -      };
 +      unsigned long *entries;
 +      unsigned int nr_entries;
  
 -      depot_fetch_stack(stack, &trace);
 -      snprint_stack_trace(buf, sz, &trace, indent);
 +      nr_entries = stack_depot_fetch(stack, &entries);
 +      stack_trace_snprint(buf, sz, entries, nr_entries, indent);
  }
  
  static void init_intel_runtime_pm_wakeref(struct drm_i915_private *i915)
@@@ -147,7 -162,7 +151,7 @@@ static void cancel_intel_runtime_pm_wak
                 rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
                char *buf;
  
-               buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+               buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
                if (!buf)
                        return;
  
@@@ -183,7 -198,7 +187,7 @@@ __print_intel_runtime_pm_wakeref(struc
        unsigned long i;
        char *buf;
  
-       buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+       buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
        if (!buf)
                return;
  
@@@ -267,7 -282,9 +271,9 @@@ void print_intel_runtime_pm_wakeref(str
                if (dbg.count <= alloc)
                        break;
  
-               s = krealloc(dbg.owners, dbg.count * sizeof(*s), GFP_KERNEL);
+               s = krealloc(dbg.owners,
+                            dbg.count * sizeof(*s),
+                            GFP_NOWAIT | __GFP_NOWARN);
                if (!s)
                        goto out;
  
@@@ -554,7 -571,7 +560,7 @@@ static void hsw_wait_for_power_well_ena
        int pw_idx = power_well->desc->hsw.idx;
  
        /* Timeout for PW1:10 us, AUX:not specified, other PWs:20 us. */
-       WARN_ON(intel_wait_for_register(dev_priv,
+       WARN_ON(intel_wait_for_register(&dev_priv->uncore,
                                        regs->driver,
                                        HSW_PWR_WELL_CTL_STATE(pw_idx),
                                        HSW_PWR_WELL_CTL_STATE(pw_idx),
@@@ -609,7 -626,7 +615,7 @@@ static void gen9_wait_for_power_well_fu
                                           enum skl_power_gate pg)
  {
        /* Timeout 5us for PG#0, for other PGs 1us */
-       WARN_ON(intel_wait_for_register(dev_priv, SKL_FUSE_STATUS,
+       WARN_ON(intel_wait_for_register(&dev_priv->uncore, SKL_FUSE_STATUS,
                                        SKL_FUSE_PG_DIST_STATUS(pg),
                                        SKL_FUSE_PG_DIST_STATUS(pg), 1));
  }
@@@ -1510,7 -1527,7 +1516,7 @@@ static void assert_chv_phy_status(struc
         * The PHY may be busy with some initial calibration and whatnot,
         * so the power state can take a while to actually change.
         */
-       if (intel_wait_for_register(dev_priv,
+       if (intel_wait_for_register(&dev_priv->uncore,
                                    DISPLAY_PHY_STATUS,
                                    phy_status_mask,
                                    phy_status,
@@@ -1545,7 -1562,7 +1551,7 @@@ static void chv_dpio_cmn_power_well_ena
        vlv_set_power_well(dev_priv, power_well, true);
  
        /* Poll for phypwrgood signal */
-       if (intel_wait_for_register(dev_priv,
+       if (intel_wait_for_register(&dev_priv->uncore,
                                    DISPLAY_PHY_STATUS,
                                    PHY_POWERGOOD(phy),
                                    PHY_POWERGOOD(phy),
@@@ -1749,7 -1766,7 +1755,7 @@@ static bool chv_pipe_power_well_enabled
  
        mutex_lock(&dev_priv->pcu_lock);
  
-       state = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe);
+       state = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe);
        /*
         * We only ever set the power-on and power-gate states, anything
         * else is unexpected.
         * A transient state at this point would mean some unexpected party
         * is poking at the power controls too.
         */
-       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSC_MASK(pipe);
+       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSC_MASK(pipe);
        WARN_ON(ctrl << 16 != state);
  
        mutex_unlock(&dev_priv->pcu_lock);
@@@ -1782,20 -1799,20 +1788,20 @@@ static void chv_set_pipe_power_well(str
        mutex_lock(&dev_priv->pcu_lock);
  
  #define COND \
-       ((vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ) & DP_SSS_MASK(pipe)) == state)
+       ((vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state)
  
        if (COND)
                goto out;
  
-       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ);
+       ctrl = vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM);
        ctrl &= ~DP_SSC_MASK(pipe);
        ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe);
-       vlv_punit_write(dev_priv, PUNIT_REG_DSPFREQ, ctrl);
+       vlv_punit_write(dev_priv, PUNIT_REG_DSPSSPM, ctrl);
  
        if (wait_for(COND, 100))
                DRM_ERROR("timeout setting power well state %08x (%08x)\n",
                          state,
-                         vlv_punit_read(dev_priv, PUNIT_REG_DSPFREQ));
+                         vlv_punit_read(dev_priv, PUNIT_REG_DSPSSPM));
  
  #undef COND
  
@@@ -3431,7 -3448,7 +3437,7 @@@ int intel_power_domains_init(struct drm
         * The enabling order will be from lower to higher indexed wells,
         * the disabling order is reversed.
         */
-       if (IS_ICELAKE(dev_priv)) {
+       if (IS_GEN(dev_priv, 11)) {
                err = set_power_wells(power_domains, icl_power_wells);
        } else if (IS_CANNONLAKE(dev_priv)) {
                err = set_power_wells(power_domains, cnl_power_wells);
@@@ -3565,7 -3582,11 +3571,11 @@@ static void icl_dbuf_enable(struct drm_
            !(I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
                DRM_ERROR("DBuf power enable timeout\n");
        else
-               dev_priv->wm.skl_hw.ddb.enabled_slices = 2;
+               /*
+                * FIXME: for now pretend that we only have 1 slice, see
+                * intel_enabled_dbuf_slices_num().
+                */
+               dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
  }
  
  static void icl_dbuf_disable(struct drm_i915_private *dev_priv)
            (I915_READ(DBUF_CTL_S2) & DBUF_POWER_STATE))
                DRM_ERROR("DBuf power disable timeout!\n");
        else
-               dev_priv->wm.skl_hw.ddb.enabled_slices = 0;
+               /*
+                * FIXME: for now pretend that the first slice is always
+                * enabled, see intel_enabled_dbuf_slices_num().
+                */
+               dev_priv->wm.skl_hw.ddb.enabled_slices = 1;
  }
  
  static void icl_mbus_init(struct drm_i915_private *dev_priv)
@@@ -3641,7 -3666,7 +3655,7 @@@ static void skl_display_core_init(struc
  
        mutex_unlock(&power_domains->lock);
  
-       skl_init_cdclk(dev_priv);
+       intel_cdclk_init(dev_priv);
  
        gen9_dbuf_enable(dev_priv);
  
@@@ -3658,7 -3683,7 +3672,7 @@@ static void skl_display_core_uninit(str
  
        gen9_dbuf_disable(dev_priv);
  
-       skl_uninit_cdclk(dev_priv);
+       intel_cdclk_uninit(dev_priv);
  
        /* The spec doesn't call for removing the reset handshake flag */
        /* disable PG1 and Misc I/O */
@@@ -3703,7 -3728,7 +3717,7 @@@ void bxt_display_core_init(struct drm_i
  
        mutex_unlock(&power_domains->lock);
  
-       bxt_init_cdclk(dev_priv);
+       intel_cdclk_init(dev_priv);
  
        gen9_dbuf_enable(dev_priv);
  
@@@ -3720,7 -3745,7 +3734,7 @@@ void bxt_display_core_uninit(struct drm
  
        gen9_dbuf_disable(dev_priv);
  
-       bxt_uninit_cdclk(dev_priv);
+       intel_cdclk_uninit(dev_priv);
  
        /* The spec doesn't call for removing the reset handshake flag */
  
@@@ -3762,7 -3787,7 +3776,7 @@@ static void cnl_display_core_init(struc
        mutex_unlock(&power_domains->lock);
  
        /* 5. Enable CD clock */
-       cnl_init_cdclk(dev_priv);
+       intel_cdclk_init(dev_priv);
  
        /* 6. Enable DBUF */
        gen9_dbuf_enable(dev_priv);
@@@ -3784,7 -3809,7 +3798,7 @@@ static void cnl_display_core_uninit(str
        gen9_dbuf_disable(dev_priv);
  
        /* 3. Disable CD clock */
-       cnl_uninit_cdclk(dev_priv);
+       intel_cdclk_uninit(dev_priv);
  
        /*
         * 4. Disable Power Well 1 (PG1).
@@@ -3826,7 -3851,7 +3840,7 @@@ void icl_display_core_init(struct drm_i
        mutex_unlock(&power_domains->lock);
  
        /* 5. Enable CDCLK. */
-       icl_init_cdclk(dev_priv);
+       intel_cdclk_init(dev_priv);
  
        /* 6. Enable DBUF. */
        icl_dbuf_enable(dev_priv);
@@@ -3851,7 -3876,7 +3865,7 @@@ void icl_display_core_uninit(struct drm
        icl_dbuf_disable(dev_priv);
  
        /* 3. Disable CD clock */
-       icl_uninit_cdclk(dev_priv);
+       intel_cdclk_uninit(dev_priv);
  
        /*
         * 4. Disable Power Well 1 (PG1).
@@@ -3982,6 -4007,36 +3996,36 @@@ static void vlv_cmnlane_wa(struct drm_i
        cmn->desc->ops->disable(dev_priv, cmn);
  }
  
+ static bool vlv_punit_is_power_gated(struct drm_i915_private *dev_priv, u32 reg0)
+ {
+       bool ret;
+       mutex_lock(&dev_priv->pcu_lock);
+       ret = (vlv_punit_read(dev_priv, reg0) & SSPM0_SSC_MASK) == SSPM0_SSC_PWR_GATE;
+       mutex_unlock(&dev_priv->pcu_lock);
+       return ret;
+ }
+ static void assert_ved_power_gated(struct drm_i915_private *dev_priv)
+ {
+       WARN(!vlv_punit_is_power_gated(dev_priv, PUNIT_REG_VEDSSPM0),
+            "VED not power gated\n");
+ }
+ static void assert_isp_power_gated(struct drm_i915_private *dev_priv)
+ {
+       static const struct pci_device_id isp_ids[] = {
+               {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0f38)},
+               {PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x22b8)},
+               {}
+       };
+       WARN(!pci_dev_present(isp_ids) &&
+            !vlv_punit_is_power_gated(dev_priv, PUNIT_REG_ISPSSPM0),
+            "ISP not power gated\n");
+ }
  static void intel_power_domains_verify_state(struct drm_i915_private *dev_priv);
  
  /**
@@@ -4006,7 -4061,7 +4050,7 @@@ void intel_power_domains_init_hw(struc
  
        power_domains->initializing = true;
  
-       if (IS_ICELAKE(i915)) {
+       if (INTEL_GEN(i915) >= 11) {
                icl_display_core_init(i915, resume);
        } else if (IS_CANNONLAKE(i915)) {
                cnl_display_core_init(i915, resume);
                mutex_lock(&power_domains->lock);
                chv_phy_control_init(i915);
                mutex_unlock(&power_domains->lock);
+               assert_isp_power_gated(i915);
        } else if (IS_VALLEYVIEW(i915)) {
                mutex_lock(&power_domains->lock);
                vlv_cmnlane_wa(i915);
                mutex_unlock(&power_domains->lock);
+               assert_ved_power_gated(i915);
+               assert_isp_power_gated(i915);
        } else if (IS_IVYBRIDGE(i915) || INTEL_GEN(i915) >= 7) {
                intel_pch_reset_handshake(i915, !HAS_PCH_NOP(i915));
        }
@@@ -4151,7 -4209,7 +4198,7 @@@ void intel_power_domains_suspend(struc
                intel_power_domains_verify_state(i915);
        }
  
-       if (IS_ICELAKE(i915))
+       if (INTEL_GEN(i915) >= 11)
                icl_display_core_uninit(i915);
        else if (IS_CANNONLAKE(i915))
                cnl_display_core_uninit(i915);
index 54011df8c2e807d7984dc7985764525899ff49d7,311a20c942ebbc5338d95c39ca08002a973e95fc..9cc1d678674f2dc3b12894459a00c2c54605f08a
@@@ -71,7 -71,7 +71,7 @@@ static void ipu_crtc_disable_planes(str
        if (disable_partial)
                ipu_plane_disable(ipu_crtc->plane[1], true);
        if (disable_full)
 -              ipu_plane_disable(ipu_crtc->plane[0], false);
 +              ipu_plane_disable(ipu_crtc->plane[0], true);
  }
  
  static void ipu_crtc_atomic_disable(struct drm_crtc *crtc,
@@@ -295,7 -295,7 +295,7 @@@ static void ipu_crtc_mode_set_nofb(stru
        sig_cfg.enable_pol = !(imx_crtc_state->bus_flags & DRM_BUS_FLAG_DE_LOW);
        /* Default to driving pixel data on negative clock edges */
        sig_cfg.clk_pol = !!(imx_crtc_state->bus_flags &
-                            DRM_BUS_FLAG_PIXDATA_POSEDGE);
+                            DRM_BUS_FLAG_PIXDATA_DRIVE_POSEDGE);
        sig_cfg.bus_format = imx_crtc_state->bus_format;
        sig_cfg.v_to_h_sync = 0;
        sig_cfg.hsync_pin = imx_crtc_state->di_hsync_pin;
index 1a01669b159ab78c0c9b849616bcc7d852738b77,988416fb8a0bdb4135566db8a60190fb7b231225..2845fceb2fbd8f3c2eebe188e2f8d97403da534e
@@@ -49,8 -49,9 +49,8 @@@ static void ttm_bo_global_kobj_release(
   * ttm_global_mutex - protecting the global BO state
   */
  DEFINE_MUTEX(ttm_global_mutex);
 -struct ttm_bo_global ttm_bo_glob = {
 -      .use_count = 0
 -};
 +unsigned ttm_bo_glob_use_count;
 +struct ttm_bo_global ttm_bo_glob;
  
  static struct attribute ttm_bo_count = {
        .name = "bo_count",
@@@ -875,10 -876,8 +875,10 @@@ static int ttm_bo_add_move_fence(struc
                reservation_object_add_shared_fence(bo->resv, fence);
  
                ret = reservation_object_reserve_shared(bo->resv, 1);
 -              if (unlikely(ret))
 +              if (unlikely(ret)) {
 +                      dma_fence_put(fence);
                        return ret;
 +              }
  
                dma_fence_put(bo->moving);
                bo->moving = fence;
@@@ -1530,13 -1529,12 +1530,13 @@@ static void ttm_bo_global_release(void
        struct ttm_bo_global *glob = &ttm_bo_glob;
  
        mutex_lock(&ttm_global_mutex);
 -      if (--glob->use_count > 0)
 +      if (--ttm_bo_glob_use_count > 0)
                goto out;
  
        kobject_del(&glob->kobj);
        kobject_put(&glob->kobj);
        ttm_mem_global_release(&ttm_mem_glob);
 +      memset(glob, 0, sizeof(*glob));
  out:
        mutex_unlock(&ttm_global_mutex);
  }
@@@ -1548,7 -1546,7 +1548,7 @@@ static int ttm_bo_global_init(void
        unsigned i;
  
        mutex_lock(&ttm_global_mutex);
 -      if (++glob->use_count > 1)
 +      if (++ttm_bo_glob_use_count > 1)
                goto out;
  
        ret = ttm_mem_global_init(&ttm_mem_glob);
@@@ -1626,7 -1624,6 +1626,6 @@@ EXPORT_SYMBOL(ttm_bo_device_release)
  int ttm_bo_device_init(struct ttm_bo_device *bdev,
                       struct ttm_bo_driver *driver,
                       struct address_space *mapping,
-                      uint64_t file_page_offset,
                       bool need_dma32)
  {
        struct ttm_bo_global *glob = &ttm_bo_glob;
        if (unlikely(ret != 0))
                goto out_no_sys;
  
-       drm_vma_offset_manager_init(&bdev->vma_manager, file_page_offset,
-                                   0x10000000);
+       drm_vma_offset_manager_init(&bdev->vma_manager,
+                                   DRM_FILE_PAGE_OFFSET_START,
+                                   DRM_FILE_PAGE_OFFSET_SIZE);
        INIT_DELAYED_WORK(&bdev->wq, ttm_bo_delayed_workqueue);
        INIT_LIST_HEAD(&bdev->ddestroy);
        bdev->dev_mapping = mapping;
index 9a0909decb3668ee1e56a729c7664f3f01a33a72,699fed9e08ee180a925cf1da69ded5ec4fe2def5..8617958b7ae6b7759e27c6e1ff3235e53060f0a1
@@@ -81,7 -81,7 +81,7 @@@ static void ttm_mem_zone_kobj_release(s
        struct ttm_mem_zone *zone =
                container_of(kobj, struct ttm_mem_zone, kobj);
  
-       pr_info("Zone %7s: Used memory at exit: %llu kiB\n",
+       pr_info("Zone %7s: Used memory at exit: %llu KiB\n",
                zone->name, (unsigned long long)zone->used_mem >> 10);
        kfree(zone);
  }
@@@ -448,7 -448,7 +448,7 @@@ int ttm_mem_global_init(struct ttm_mem_
  #endif
        for (i = 0; i < glob->num_zones; ++i) {
                zone = glob->zones[i];
-               pr_info("Zone %7s: Available graphics memory: %llu kiB\n",
+               pr_info("Zone %7s: Available graphics memory: %llu KiB\n",
                        zone->name, (unsigned long long)zone->max_mem >> 10);
        }
        ttm_page_alloc_init(glob, glob->zone_kernel->max_mem/(2*PAGE_SIZE));
@@@ -461,8 -461,8 +461,8 @@@ out_no_zone
  
  void ttm_mem_global_release(struct ttm_mem_global *glob)
  {
 -      unsigned int i;
        struct ttm_mem_zone *zone;
 +      unsigned int i;
  
        /* let the page allocator first stop the shrink work. */
        ttm_page_alloc_fini();
                zone = glob->zones[i];
                kobject_del(&zone->kobj);
                kobject_put(&zone->kobj);
 -                      }
 +      }
        kobject_del(&glob->kobj);
        kobject_put(&glob->kobj);
 +      memset(glob, 0, sizeof(*glob));
  }
  
  static void ttm_check_swapping(struct ttm_mem_global *glob)
@@@ -523,7 -522,7 +523,7 @@@ static void ttm_mem_global_free_zone(st
  void ttm_mem_global_free(struct ttm_mem_global *glob,
                         uint64_t amount)
  {
-       return ttm_mem_global_free_zone(glob, NULL, amount);
+       return ttm_mem_global_free_zone(glob, glob->zone_kernel, amount);
  }
  EXPORT_SYMBOL(ttm_mem_global_free);
  
@@@ -622,10 -621,10 +622,10 @@@ int ttm_mem_global_alloc(struct ttm_mem
  {
        /**
         * Normal allocations of kernel memory are registered in
-        * all zones.
+        * the kernel zone.
         */
  
-       return ttm_mem_global_alloc_zone(glob, NULL, memory, ctx);
+       return ttm_mem_global_alloc_zone(glob, glob->zone_kernel, memory, ctx);
  }
  EXPORT_SYMBOL(ttm_mem_global_alloc);
  
index 0000000000000000000000000000000000000000,1f4182e2e980ad5cf1617d360125ca9de7efa68e..d6ab955c0768c5c1f450b7f5eee56a4af5a9f038
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,15 +1,16 @@@
++# SPDX-License-Identifier: GPL-2.0
+ config DRM_VBOXVIDEO
+       tristate "Virtual Box Graphics Card"
+       depends on DRM && X86 && PCI
+       select DRM_KMS_HELPER
+       select DRM_TTM
+       select GENERIC_ALLOCATOR
+       help
+         This is a KMS driver for the virtual Graphics Card used in
+         Virtual Box virtual machines.
+         Although it is possible to build this driver built-in to the
+         kernel, it is advised to build it as a module, so that it can
+         be updated independently of the kernel. Select M to build this
+         driver as a module and add support for these devices via drm/kms
+         interfaces.
index 0000000000000000000000000000000000000000,620a6e38f71f310e1d95d5d574bc76fa16da4842..58cea131470e1eda5dcf48b14bfa4cd8a8730297
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,940 +1,939 @@@
 -              vbox_write_ioport(
 -                      VBE_DISPI_INDEX_X_OFFSET,
+ // SPDX-License-Identifier: MIT
+ /*
+  * Copyright (C) 2013-2017 Oracle Corporation
+  * This file is based on ast_mode.c
+  * Copyright 2012 Red Hat Inc.
+  * Parts based on xf86-video-ast
+  * Copyright (c) 2005 ASPEED Technology Inc.
+  * Authors: Dave Airlie <airlied@redhat.com>
+  *          Michael Thayer <michael.thayer@oracle.com,
+  *          Hans de Goede <hdegoede@redhat.com>
+  */
+ #include <linux/export.h>
+ #include <drm/drm_atomic.h>
+ #include <drm/drm_atomic_helper.h>
+ #include <drm/drm_fourcc.h>
+ #include <drm/drm_plane_helper.h>
+ #include <drm/drm_probe_helper.h>
+ #include <drm/drm_vblank.h>
+ #include "hgsmi_channels.h"
+ #include "vbox_drv.h"
+ #include "vboxvideo.h"
+ /*
+  * Set a graphics mode.  Poke any required values into registers, do an HGSMI
+  * mode set and tell the host we support advanced graphics functions.
+  */
+ static void vbox_do_modeset(struct drm_crtc *crtc)
+ {
+       struct drm_framebuffer *fb = crtc->primary->state->fb;
+       struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+       struct vbox_private *vbox;
+       int width, height, bpp, pitch;
+       u16 flags;
+       s32 x_offset, y_offset;
+       vbox = crtc->dev->dev_private;
+       width = vbox_crtc->width ? vbox_crtc->width : 640;
+       height = vbox_crtc->height ? vbox_crtc->height : 480;
+       bpp = fb ? fb->format->cpp[0] * 8 : 32;
+       pitch = fb ? fb->pitches[0] : width * bpp / 8;
+       x_offset = vbox->single_framebuffer ? vbox_crtc->x : vbox_crtc->x_hint;
+       y_offset = vbox->single_framebuffer ? vbox_crtc->y : vbox_crtc->y_hint;
+       /*
+        * This is the old way of setting graphics modes.  It assumed one screen
+        * and a frame-buffer at the start of video RAM.  On older versions of
+        * VirtualBox, certain parts of the code still assume that the first
+        * screen is programmed this way, so try to fake it.
+        */
+       if (vbox_crtc->crtc_id == 0 && fb &&
+           vbox_crtc->fb_offset / pitch < 0xffff - crtc->y &&
+           vbox_crtc->fb_offset % (bpp / 8) == 0) {
+               vbox_write_ioport(VBE_DISPI_INDEX_XRES, width);
+               vbox_write_ioport(VBE_DISPI_INDEX_YRES, height);
+               vbox_write_ioport(VBE_DISPI_INDEX_VIRT_WIDTH, pitch * 8 / bpp);
+               vbox_write_ioport(VBE_DISPI_INDEX_BPP, bpp);
+               vbox_write_ioport(VBE_DISPI_INDEX_ENABLE, VBE_DISPI_ENABLED);
++              vbox_write_ioport(VBE_DISPI_INDEX_X_OFFSET,
+                       vbox_crtc->fb_offset % pitch / bpp * 8 + vbox_crtc->x);
+               vbox_write_ioport(VBE_DISPI_INDEX_Y_OFFSET,
+                                 vbox_crtc->fb_offset / pitch + vbox_crtc->y);
+       }
+       flags = VBVA_SCREEN_F_ACTIVE;
+       flags |= (fb && crtc->state->enable) ? 0 : VBVA_SCREEN_F_BLANK;
+       flags |= vbox_crtc->disconnected ? VBVA_SCREEN_F_DISABLED : 0;
+       hgsmi_process_display_info(vbox->guest_pool, vbox_crtc->crtc_id,
+                                  x_offset, y_offset,
+                                  vbox_crtc->x * bpp / 8 +
+                                                       vbox_crtc->y * pitch,
+                                  pitch, width, height, bpp, flags);
+ }
+ static int vbox_set_view(struct drm_crtc *crtc)
+ {
+       struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+       struct vbox_private *vbox = crtc->dev->dev_private;
+       struct vbva_infoview *p;
+       /*
+        * Tell the host about the view.  This design originally targeted the
+        * Windows XP driver architecture and assumed that each screen would
+        * have a dedicated frame buffer with the command buffer following it,
+        * the whole being a "view".  The host works out which screen a command
+        * buffer belongs to by checking whether it is in the first view, then
+        * whether it is in the second and so on.  The first match wins.  We
+        * cheat around this by making the first view be the managed memory
+        * plus the first command buffer, the second the same plus the second
+        * buffer and so on.
+        */
+       p = hgsmi_buffer_alloc(vbox->guest_pool, sizeof(*p),
+                              HGSMI_CH_VBVA, VBVA_INFO_VIEW);
+       if (!p)
+               return -ENOMEM;
+       p->view_index = vbox_crtc->crtc_id;
+       p->view_offset = vbox_crtc->fb_offset;
+       p->view_size = vbox->available_vram_size - vbox_crtc->fb_offset +
+                      vbox_crtc->crtc_id * VBVA_MIN_BUFFER_SIZE;
+       p->max_screen_size = vbox->available_vram_size - vbox_crtc->fb_offset;
+       hgsmi_buffer_submit(vbox->guest_pool, p);
+       hgsmi_buffer_free(vbox->guest_pool, p);
+       return 0;
+ }
+ /*
+  * Try to map the layout of virtual screens to the range of the input device.
+  * Return true if we need to re-set the crtc modes due to screen offset
+  * changes.
+  */
+ static bool vbox_set_up_input_mapping(struct vbox_private *vbox)
+ {
+       struct drm_crtc *crtci;
+       struct drm_connector *connectori;
+       struct drm_framebuffer *fb, *fb1 = NULL;
+       bool single_framebuffer = true;
+       bool old_single_framebuffer = vbox->single_framebuffer;
+       u16 width = 0, height = 0;
+       /*
+        * Are we using an X.Org-style single large frame-buffer for all crtcs?
+        * If so then screen layout can be deduced from the crtc offsets.
+        * Same fall-back if this is the fbdev frame-buffer.
+        */
+       list_for_each_entry(crtci, &vbox->ddev.mode_config.crtc_list, head) {
+               fb = crtci->primary->state->fb;
+               if (!fb)
+                       continue;
+               if (!fb1) {
+                       fb1 = fb;
+                       if (to_vbox_framebuffer(fb1) == &vbox->afb)
+                               break;
+               } else if (fb != fb1) {
+                       single_framebuffer = false;
+               }
+       }
+       if (!fb1)
+               return false;
+       if (single_framebuffer) {
+               vbox->single_framebuffer = true;
+               vbox->input_mapping_width = fb1->width;
+               vbox->input_mapping_height = fb1->height;
+               return old_single_framebuffer != vbox->single_framebuffer;
+       }
+       /* Otherwise calculate the total span of all screens. */
+       list_for_each_entry(connectori, &vbox->ddev.mode_config.connector_list,
+                           head) {
+               struct vbox_connector *vbox_connector =
+                   to_vbox_connector(connectori);
+               struct vbox_crtc *vbox_crtc = vbox_connector->vbox_crtc;
+               width = max_t(u16, width, vbox_crtc->x_hint +
+                                         vbox_connector->mode_hint.width);
+               height = max_t(u16, height, vbox_crtc->y_hint +
+                                           vbox_connector->mode_hint.height);
+       }
+       vbox->single_framebuffer = false;
+       vbox->input_mapping_width = width;
+       vbox->input_mapping_height = height;
+       return old_single_framebuffer != vbox->single_framebuffer;
+ }
+ static void vbox_crtc_set_base_and_mode(struct drm_crtc *crtc,
+                                       struct drm_framebuffer *fb,
+                                       int x, int y)
+ {
+       struct vbox_bo *bo = gem_to_vbox_bo(to_vbox_framebuffer(fb)->obj);
+       struct vbox_private *vbox = crtc->dev->dev_private;
+       struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc);
+       bool needs_modeset = drm_atomic_crtc_needs_modeset(crtc->state);
+       mutex_lock(&vbox->hw_mutex);
+       if (crtc->state->enable) {
+               vbox_crtc->width = crtc->state->mode.hdisplay;
+               vbox_crtc->height = crtc->state->mode.vdisplay;
+       }
+       vbox_crtc->x = x;
+       vbox_crtc->y = y;
+       vbox_crtc->fb_offset = vbox_bo_gpu_offset(bo);
+       /* vbox_do_modeset() checks vbox->single_framebuffer so update it now */
+       if (needs_modeset && vbox_set_up_input_mapping(vbox)) {
+               struct drm_crtc *crtci;
+               list_for_each_entry(crtci, &vbox->ddev.mode_config.crtc_list,
+                                   head) {
+                       if (crtci == crtc)
+                               continue;
+                       vbox_do_modeset(crtci);
+               }
+       }
+       vbox_set_view(crtc);
+       vbox_do_modeset(crtc);
+       if (needs_modeset)
+               hgsmi_update_input_mapping(vbox->guest_pool, 0, 0,
+                                          vbox->input_mapping_width,
+                                          vbox->input_mapping_height);
+       mutex_unlock(&vbox->hw_mutex);
+ }
+ static void vbox_crtc_atomic_enable(struct drm_crtc *crtc,
+                                   struct drm_crtc_state *old_crtc_state)
+ {
+ }
+ static void vbox_crtc_atomic_disable(struct drm_crtc *crtc,
+                                    struct drm_crtc_state *old_crtc_state)
+ {
+ }
+ static void vbox_crtc_atomic_flush(struct drm_crtc *crtc,
+                                  struct drm_crtc_state *old_crtc_state)
+ {
+       struct drm_pending_vblank_event *event;
+       unsigned long flags;
+       if (crtc->state && crtc->state->event) {
+               event = crtc->state->event;
+               crtc->state->event = NULL;
+               spin_lock_irqsave(&crtc->dev->event_lock, flags);
+               drm_crtc_send_vblank_event(crtc, event);
+               spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+       }
+ }
+ static const struct drm_crtc_helper_funcs vbox_crtc_helper_funcs = {
+       .atomic_enable = vbox_crtc_atomic_enable,
+       .atomic_disable = vbox_crtc_atomic_disable,
+       .atomic_flush = vbox_crtc_atomic_flush,
+ };
+ static void vbox_crtc_destroy(struct drm_crtc *crtc)
+ {
+       drm_crtc_cleanup(crtc);
+       kfree(crtc);
+ }
+ static const struct drm_crtc_funcs vbox_crtc_funcs = {
+       .set_config = drm_atomic_helper_set_config,
+       .page_flip = drm_atomic_helper_page_flip,
+       /* .gamma_set = vbox_crtc_gamma_set, */
+       .destroy = vbox_crtc_destroy,
+       .reset = drm_atomic_helper_crtc_reset,
+       .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ };
+ static int vbox_primary_atomic_check(struct drm_plane *plane,
+                                    struct drm_plane_state *new_state)
+ {
+       struct drm_crtc_state *crtc_state = NULL;
+       if (new_state->crtc) {
+               crtc_state = drm_atomic_get_existing_crtc_state(
+                                           new_state->state, new_state->crtc);
+               if (WARN_ON(!crtc_state))
+                       return -EINVAL;
+       }
+       return drm_atomic_helper_check_plane_state(new_state, crtc_state,
+                                                  DRM_PLANE_HELPER_NO_SCALING,
+                                                  DRM_PLANE_HELPER_NO_SCALING,
+                                                  false, true);
+ }
+ static void vbox_primary_atomic_update(struct drm_plane *plane,
+                                      struct drm_plane_state *old_state)
+ {
+       struct drm_crtc *crtc = plane->state->crtc;
+       struct drm_framebuffer *fb = plane->state->fb;
+       vbox_crtc_set_base_and_mode(crtc, fb,
+                                   plane->state->src_x >> 16,
+                                   plane->state->src_y >> 16);
+ }
+ static void vbox_primary_atomic_disable(struct drm_plane *plane,
+                                       struct drm_plane_state *old_state)
+ {
+       struct drm_crtc *crtc = old_state->crtc;
+       /* vbox_do_modeset checks plane->state->fb and will disable if NULL */
+       vbox_crtc_set_base_and_mode(crtc, old_state->fb,
+                                   old_state->src_x >> 16,
+                                   old_state->src_y >> 16);
+ }
+ static int vbox_primary_prepare_fb(struct drm_plane *plane,
+                                  struct drm_plane_state *new_state)
+ {
+       struct vbox_bo *bo;
+       int ret;
+       if (!new_state->fb)
+               return 0;
+       bo = gem_to_vbox_bo(to_vbox_framebuffer(new_state->fb)->obj);
+       ret = vbox_bo_pin(bo, TTM_PL_FLAG_VRAM);
+       if (ret)
+               DRM_WARN("Error %d pinning new fb, out of video mem?\n", ret);
+       return ret;
+ }
+ static void vbox_primary_cleanup_fb(struct drm_plane *plane,
+                                   struct drm_plane_state *old_state)
+ {
+       struct vbox_bo *bo;
+       if (!old_state->fb)
+               return;
+       bo = gem_to_vbox_bo(to_vbox_framebuffer(old_state->fb)->obj);
+       vbox_bo_unpin(bo);
+ }
+ static int vbox_cursor_atomic_check(struct drm_plane *plane,
+                                   struct drm_plane_state *new_state)
+ {
+       struct drm_crtc_state *crtc_state = NULL;
+       u32 width = new_state->crtc_w;
+       u32 height = new_state->crtc_h;
+       int ret;
+       if (new_state->crtc) {
+               crtc_state = drm_atomic_get_existing_crtc_state(
+                                           new_state->state, new_state->crtc);
+               if (WARN_ON(!crtc_state))
+                       return -EINVAL;
+       }
+       ret = drm_atomic_helper_check_plane_state(new_state, crtc_state,
+                                                 DRM_PLANE_HELPER_NO_SCALING,
+                                                 DRM_PLANE_HELPER_NO_SCALING,
+                                                 true, true);
+       if (ret)
+               return ret;
+       if (!new_state->fb)
+               return 0;
+       if (width > VBOX_MAX_CURSOR_WIDTH || height > VBOX_MAX_CURSOR_HEIGHT ||
+           width == 0 || height == 0)
+               return -EINVAL;
+       return 0;
+ }
+ /*
+  * Copy the ARGB image and generate the mask, which is needed in case the host
+  * does not support ARGB cursors.  The mask is a 1BPP bitmap with the bit set
+  * if the corresponding alpha value in the ARGB image is greater than 0xF0.
+  */
+ static void copy_cursor_image(u8 *src, u8 *dst, u32 width, u32 height,
+                             size_t mask_size)
+ {
+       size_t line_size = (width + 7) / 8;
+       u32 i, j;
+       memcpy(dst + mask_size, src, width * height * 4);
+       for (i = 0; i < height; ++i)
+               for (j = 0; j < width; ++j)
+                       if (((u32 *)src)[i * width + j] > 0xf0000000)
+                               dst[i * line_size + j / 8] |= (0x80 >> (j % 8));
+ }
+ static void vbox_cursor_atomic_update(struct drm_plane *plane,
+                                     struct drm_plane_state *old_state)
+ {
+       struct vbox_private *vbox =
+               container_of(plane->dev, struct vbox_private, ddev);
+       struct vbox_crtc *vbox_crtc = to_vbox_crtc(plane->state->crtc);
+       struct drm_framebuffer *fb = plane->state->fb;
+       struct vbox_bo *bo = gem_to_vbox_bo(to_vbox_framebuffer(fb)->obj);
+       u32 width = plane->state->crtc_w;
+       u32 height = plane->state->crtc_h;
+       size_t data_size, mask_size;
+       u32 flags;
+       u8 *src;
+       /*
+        * VirtualBox uses the host windowing system to draw the cursor so
+        * moves are a no-op, we only need to upload new cursor sprites.
+        */
+       if (fb == old_state->fb)
+               return;
+       mutex_lock(&vbox->hw_mutex);
+       vbox_crtc->cursor_enabled = true;
+       /* pinning is done in prepare/cleanup framebuffer */
+       src = vbox_bo_kmap(bo);
+       if (IS_ERR(src)) {
+               mutex_unlock(&vbox->hw_mutex);
+               DRM_WARN("Could not kmap cursor bo, skipping update\n");
+               return;
+       }
+       /*
+        * The mask must be calculated based on the alpha
+        * channel, one bit per ARGB word, and must be 32-bit
+        * padded.
+        */
+       mask_size = ((width + 7) / 8 * height + 3) & ~3;
+       data_size = width * height * 4 + mask_size;
+       copy_cursor_image(src, vbox->cursor_data, width, height, mask_size);
+       vbox_bo_kunmap(bo);
+       flags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE |
+               VBOX_MOUSE_POINTER_ALPHA;
+       hgsmi_update_pointer_shape(vbox->guest_pool, flags,
+                                  min_t(u32, max(fb->hot_x, 0), width),
+                                  min_t(u32, max(fb->hot_y, 0), height),
+                                  width, height, vbox->cursor_data, data_size);
+       mutex_unlock(&vbox->hw_mutex);
+ }
+ static void vbox_cursor_atomic_disable(struct drm_plane *plane,
+                                      struct drm_plane_state *old_state)
+ {
+       struct vbox_private *vbox =
+               container_of(plane->dev, struct vbox_private, ddev);
+       struct vbox_crtc *vbox_crtc = to_vbox_crtc(old_state->crtc);
+       bool cursor_enabled = false;
+       struct drm_crtc *crtci;
+       mutex_lock(&vbox->hw_mutex);
+       vbox_crtc->cursor_enabled = false;
+       list_for_each_entry(crtci, &vbox->ddev.mode_config.crtc_list, head) {
+               if (to_vbox_crtc(crtci)->cursor_enabled)
+                       cursor_enabled = true;
+       }
+       if (!cursor_enabled)
+               hgsmi_update_pointer_shape(vbox->guest_pool, 0, 0, 0,
+                                          0, 0, NULL, 0);
+       mutex_unlock(&vbox->hw_mutex);
+ }
+ static int vbox_cursor_prepare_fb(struct drm_plane *plane,
+                                 struct drm_plane_state *new_state)
+ {
+       struct vbox_bo *bo;
+       if (!new_state->fb)
+               return 0;
+       bo = gem_to_vbox_bo(to_vbox_framebuffer(new_state->fb)->obj);
+       return vbox_bo_pin(bo, TTM_PL_FLAG_SYSTEM);
+ }
+ static void vbox_cursor_cleanup_fb(struct drm_plane *plane,
+                                  struct drm_plane_state *old_state)
+ {
+       struct vbox_bo *bo;
+       if (!plane->state->fb)
+               return;
+       bo = gem_to_vbox_bo(to_vbox_framebuffer(plane->state->fb)->obj);
+       vbox_bo_unpin(bo);
+ }
+ static const u32 vbox_cursor_plane_formats[] = {
+       DRM_FORMAT_ARGB8888,
+ };
+ static const struct drm_plane_helper_funcs vbox_cursor_helper_funcs = {
+       .atomic_check   = vbox_cursor_atomic_check,
+       .atomic_update  = vbox_cursor_atomic_update,
+       .atomic_disable = vbox_cursor_atomic_disable,
+       .prepare_fb     = vbox_cursor_prepare_fb,
+       .cleanup_fb     = vbox_cursor_cleanup_fb,
+ };
+ static const struct drm_plane_funcs vbox_cursor_plane_funcs = {
+       .update_plane   = drm_atomic_helper_update_plane,
+       .disable_plane  = drm_atomic_helper_disable_plane,
+       .destroy        = drm_primary_helper_destroy,
+       .reset          = drm_atomic_helper_plane_reset,
+       .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ };
+ static const u32 vbox_primary_plane_formats[] = {
+       DRM_FORMAT_XRGB8888,
+       DRM_FORMAT_ARGB8888,
+ };
+ static const struct drm_plane_helper_funcs vbox_primary_helper_funcs = {
+       .atomic_check = vbox_primary_atomic_check,
+       .atomic_update = vbox_primary_atomic_update,
+       .atomic_disable = vbox_primary_atomic_disable,
+       .prepare_fb = vbox_primary_prepare_fb,
+       .cleanup_fb = vbox_primary_cleanup_fb,
+ };
+ static const struct drm_plane_funcs vbox_primary_plane_funcs = {
+       .update_plane   = drm_atomic_helper_update_plane,
+       .disable_plane  = drm_atomic_helper_disable_plane,
+       .destroy        = drm_primary_helper_destroy,
+       .reset          = drm_atomic_helper_plane_reset,
+       .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ };
+ static struct drm_plane *vbox_create_plane(struct vbox_private *vbox,
+                                          unsigned int possible_crtcs,
+                                          enum drm_plane_type type)
+ {
+       const struct drm_plane_helper_funcs *helper_funcs = NULL;
+       const struct drm_plane_funcs *funcs;
+       struct drm_plane *plane;
+       const u32 *formats;
+       int num_formats;
+       int err;
+       if (type == DRM_PLANE_TYPE_PRIMARY) {
+               funcs = &vbox_primary_plane_funcs;
+               formats = vbox_primary_plane_formats;
+               helper_funcs = &vbox_primary_helper_funcs;
+               num_formats = ARRAY_SIZE(vbox_primary_plane_formats);
+       } else if (type == DRM_PLANE_TYPE_CURSOR) {
+               funcs = &vbox_cursor_plane_funcs;
+               formats = vbox_cursor_plane_formats;
+               helper_funcs = &vbox_cursor_helper_funcs;
+               num_formats = ARRAY_SIZE(vbox_cursor_plane_formats);
+       } else {
+               return ERR_PTR(-EINVAL);
+       }
+       plane = kzalloc(sizeof(*plane), GFP_KERNEL);
+       if (!plane)
+               return ERR_PTR(-ENOMEM);
+       err = drm_universal_plane_init(&vbox->ddev, plane, possible_crtcs,
+                                      funcs, formats, num_formats,
+                                      NULL, type, NULL);
+       if (err)
+               goto free_plane;
+       drm_plane_helper_add(plane, helper_funcs);
+       return plane;
+ free_plane:
+       kfree(plane);
+       return ERR_PTR(-EINVAL);
+ }
+ static struct vbox_crtc *vbox_crtc_init(struct drm_device *dev, unsigned int i)
+ {
+       struct vbox_private *vbox =
+               container_of(dev, struct vbox_private, ddev);
+       struct drm_plane *cursor = NULL;
+       struct vbox_crtc *vbox_crtc;
+       struct drm_plane *primary;
+       u32 caps = 0;
+       int ret;
+       ret = hgsmi_query_conf(vbox->guest_pool,
+                              VBOX_VBVA_CONF32_CURSOR_CAPABILITIES, &caps);
+       if (ret)
+               return ERR_PTR(ret);
+       vbox_crtc = kzalloc(sizeof(*vbox_crtc), GFP_KERNEL);
+       if (!vbox_crtc)
+               return ERR_PTR(-ENOMEM);
+       primary = vbox_create_plane(vbox, 1 << i, DRM_PLANE_TYPE_PRIMARY);
+       if (IS_ERR(primary)) {
+               ret = PTR_ERR(primary);
+               goto free_mem;
+       }
+       if ((caps & VBOX_VBVA_CURSOR_CAPABILITY_HARDWARE)) {
+               cursor = vbox_create_plane(vbox, 1 << i, DRM_PLANE_TYPE_CURSOR);
+               if (IS_ERR(cursor)) {
+                       ret = PTR_ERR(cursor);
+                       goto clean_primary;
+               }
+       } else {
+               DRM_WARN("VirtualBox host is too old, no cursor support\n");
+       }
+       vbox_crtc->crtc_id = i;
+       ret = drm_crtc_init_with_planes(dev, &vbox_crtc->base, primary, cursor,
+                                       &vbox_crtc_funcs, NULL);
+       if (ret)
+               goto clean_cursor;
+       drm_mode_crtc_set_gamma_size(&vbox_crtc->base, 256);
+       drm_crtc_helper_add(&vbox_crtc->base, &vbox_crtc_helper_funcs);
+       return vbox_crtc;
+ clean_cursor:
+       if (cursor) {
+               drm_plane_cleanup(cursor);
+               kfree(cursor);
+       }
+ clean_primary:
+       drm_plane_cleanup(primary);
+       kfree(primary);
+ free_mem:
+       kfree(vbox_crtc);
+       return ERR_PTR(ret);
+ }
+ static void vbox_encoder_destroy(struct drm_encoder *encoder)
+ {
+       drm_encoder_cleanup(encoder);
+       kfree(encoder);
+ }
+ static const struct drm_encoder_funcs vbox_enc_funcs = {
+       .destroy = vbox_encoder_destroy,
+ };
+ static struct drm_encoder *vbox_encoder_init(struct drm_device *dev,
+                                            unsigned int i)
+ {
+       struct vbox_encoder *vbox_encoder;
+       vbox_encoder = kzalloc(sizeof(*vbox_encoder), GFP_KERNEL);
+       if (!vbox_encoder)
+               return NULL;
+       drm_encoder_init(dev, &vbox_encoder->base, &vbox_enc_funcs,
+                        DRM_MODE_ENCODER_DAC, NULL);
+       vbox_encoder->base.possible_crtcs = 1 << i;
+       return &vbox_encoder->base;
+ }
+ /*
+  * Generate EDID data with a mode-unique serial number for the virtual
+  * monitor to try to persuade Unity that different modes correspond to
+  * different monitors and it should not try to force the same resolution on
+  * them.
+  */
+ static void vbox_set_edid(struct drm_connector *connector, int width,
+                         int height)
+ {
+       enum { EDID_SIZE = 128 };
+       unsigned char edid[EDID_SIZE] = {
+               0x00, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0x00, /* header */
+               0x58, 0x58,     /* manufacturer (VBX) */
+               0x00, 0x00,     /* product code */
+               0x00, 0x00, 0x00, 0x00, /* serial number goes here */
+               0x01,           /* week of manufacture */
+               0x00,           /* year of manufacture */
+               0x01, 0x03,     /* EDID version */
+               0x80,           /* capabilities - digital */
+               0x00,           /* horiz. res in cm, zero for projectors */
+               0x00,           /* vert. res in cm */
+               0x78,           /* display gamma (120 == 2.2). */
+               0xEE,           /* features (standby, suspend, off, RGB, std */
+                               /* colour space, preferred timing mode) */
+               0xEE, 0x91, 0xA3, 0x54, 0x4C, 0x99, 0x26, 0x0F, 0x50, 0x54,
+               /* chromaticity for standard colour space. */
+               0x00, 0x00, 0x00,       /* no default timings */
+               0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01,
+                   0x01, 0x01,
+               0x01, 0x01, 0x01, 0x01, /* no standard timings */
+               0x00, 0x00, 0x00, 0x06, 0x00, 0x00, 0x06, 0x00, 0x02, 0x02,
+                   0x02, 0x02,
+               /* descriptor block 1 goes below */
+               0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
+               /* descriptor block 2, monitor ranges */
+               0x00, 0x00, 0x00, 0xFD, 0x00,
+               0x00, 0xC8, 0x00, 0xC8, 0x64, 0x00, 0x0A, 0x20, 0x20, 0x20,
+                   0x20, 0x20,
+               /* 0-200Hz vertical, 0-200KHz horizontal, 1000MHz pixel clock */
+               0x20,
+               /* descriptor block 3, monitor name */
+               0x00, 0x00, 0x00, 0xFC, 0x00,
+               'V', 'B', 'O', 'X', ' ', 'm', 'o', 'n', 'i', 't', 'o', 'r',
+               '\n',
+               /* descriptor block 4: dummy data */
+               0x00, 0x00, 0x00, 0x10, 0x00,
+               0x0A, 0x20, 0x20, 0x20, 0x20, 0x20,
+               0x20, 0x20, 0x20, 0x20, 0x20, 0x20,
+               0x20,
+               0x00,           /* number of extensions */
+               0x00            /* checksum goes here */
+       };
+       int clock = (width + 6) * (height + 6) * 60 / 10000;
+       unsigned int i, sum = 0;
+       edid[12] = width & 0xff;
+       edid[13] = width >> 8;
+       edid[14] = height & 0xff;
+       edid[15] = height >> 8;
+       edid[54] = clock & 0xff;
+       edid[55] = clock >> 8;
+       edid[56] = width & 0xff;
+       edid[58] = (width >> 4) & 0xf0;
+       edid[59] = height & 0xff;
+       edid[61] = (height >> 4) & 0xf0;
+       for (i = 0; i < EDID_SIZE - 1; ++i)
+               sum += edid[i];
+       edid[EDID_SIZE - 1] = (0x100 - (sum & 0xFF)) & 0xFF;
+       drm_connector_update_edid_property(connector, (struct edid *)edid);
+ }
+ static int vbox_get_modes(struct drm_connector *connector)
+ {
+       struct vbox_connector *vbox_connector = NULL;
+       struct drm_display_mode *mode = NULL;
+       struct vbox_private *vbox = NULL;
+       unsigned int num_modes = 0;
+       int preferred_width, preferred_height;
+       vbox_connector = to_vbox_connector(connector);
+       vbox = connector->dev->dev_private;
+       hgsmi_report_flags_location(vbox->guest_pool, GUEST_HEAP_OFFSET(vbox) +
+                                   HOST_FLAGS_OFFSET);
+       if (vbox_connector->vbox_crtc->crtc_id == 0)
+               vbox_report_caps(vbox);
+       num_modes = drm_add_modes_noedid(connector, 2560, 1600);
+       preferred_width = vbox_connector->mode_hint.width ?
+                         vbox_connector->mode_hint.width : 1024;
+       preferred_height = vbox_connector->mode_hint.height ?
+                          vbox_connector->mode_hint.height : 768;
+       mode = drm_cvt_mode(connector->dev, preferred_width, preferred_height,
+                           60, false, false, false);
+       if (mode) {
+               mode->type |= DRM_MODE_TYPE_PREFERRED;
+               drm_mode_probed_add(connector, mode);
+               ++num_modes;
+       }
+       vbox_set_edid(connector, preferred_width, preferred_height);
+       if (vbox_connector->vbox_crtc->x_hint != -1)
+               drm_object_property_set_value(&connector->base,
+                       vbox->ddev.mode_config.suggested_x_property,
+                       vbox_connector->vbox_crtc->x_hint);
+       else
+               drm_object_property_set_value(&connector->base,
+                       vbox->ddev.mode_config.suggested_x_property, 0);
+       if (vbox_connector->vbox_crtc->y_hint != -1)
+               drm_object_property_set_value(&connector->base,
+                       vbox->ddev.mode_config.suggested_y_property,
+                       vbox_connector->vbox_crtc->y_hint);
+       else
+               drm_object_property_set_value(&connector->base,
+                       vbox->ddev.mode_config.suggested_y_property, 0);
+       return num_modes;
+ }
+ static void vbox_connector_destroy(struct drm_connector *connector)
+ {
+       drm_connector_unregister(connector);
+       drm_connector_cleanup(connector);
+       kfree(connector);
+ }
+ static enum drm_connector_status
+ vbox_connector_detect(struct drm_connector *connector, bool force)
+ {
+       struct vbox_connector *vbox_connector;
+       vbox_connector = to_vbox_connector(connector);
+       return vbox_connector->mode_hint.disconnected ?
+           connector_status_disconnected : connector_status_connected;
+ }
+ static int vbox_fill_modes(struct drm_connector *connector, u32 max_x,
+                          u32 max_y)
+ {
+       struct vbox_connector *vbox_connector;
+       struct drm_device *dev;
+       struct drm_display_mode *mode, *iterator;
+       vbox_connector = to_vbox_connector(connector);
+       dev = vbox_connector->base.dev;
+       list_for_each_entry_safe(mode, iterator, &connector->modes, head) {
+               list_del(&mode->head);
+               drm_mode_destroy(dev, mode);
+       }
+       return drm_helper_probe_single_connector_modes(connector, max_x, max_y);
+ }
+ static const struct drm_connector_helper_funcs vbox_connector_helper_funcs = {
+       .get_modes = vbox_get_modes,
+ };
+ static const struct drm_connector_funcs vbox_connector_funcs = {
+       .detect = vbox_connector_detect,
+       .fill_modes = vbox_fill_modes,
+       .destroy = vbox_connector_destroy,
+       .reset = drm_atomic_helper_connector_reset,
+       .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+       .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+ };
+ static int vbox_connector_init(struct drm_device *dev,
+                              struct vbox_crtc *vbox_crtc,
+                              struct drm_encoder *encoder)
+ {
+       struct vbox_connector *vbox_connector;
+       struct drm_connector *connector;
+       vbox_connector = kzalloc(sizeof(*vbox_connector), GFP_KERNEL);
+       if (!vbox_connector)
+               return -ENOMEM;
+       connector = &vbox_connector->base;
+       vbox_connector->vbox_crtc = vbox_crtc;
+       drm_connector_init(dev, connector, &vbox_connector_funcs,
+                          DRM_MODE_CONNECTOR_VGA);
+       drm_connector_helper_add(connector, &vbox_connector_helper_funcs);
+       connector->interlace_allowed = 0;
+       connector->doublescan_allowed = 0;
+       drm_mode_create_suggested_offset_properties(dev);
+       drm_object_attach_property(&connector->base,
+                                  dev->mode_config.suggested_x_property, 0);
+       drm_object_attach_property(&connector->base,
+                                  dev->mode_config.suggested_y_property, 0);
+       drm_connector_attach_encoder(connector, encoder);
+       return 0;
+ }
+ static struct drm_framebuffer *vbox_user_framebuffer_create(
+               struct drm_device *dev,
+               struct drm_file *filp,
+               const struct drm_mode_fb_cmd2 *mode_cmd)
+ {
+       struct vbox_private *vbox =
+               container_of(dev, struct vbox_private, ddev);
+       struct drm_gem_object *obj;
+       struct vbox_framebuffer *vbox_fb;
+       int ret = -ENOMEM;
+       obj = drm_gem_object_lookup(filp, mode_cmd->handles[0]);
+       if (!obj)
+               return ERR_PTR(-ENOENT);
+       vbox_fb = kzalloc(sizeof(*vbox_fb), GFP_KERNEL);
+       if (!vbox_fb)
+               goto err_unref_obj;
+       ret = vbox_framebuffer_init(vbox, vbox_fb, mode_cmd, obj);
+       if (ret)
+               goto err_free_vbox_fb;
+       return &vbox_fb->base;
+ err_free_vbox_fb:
+       kfree(vbox_fb);
+ err_unref_obj:
+       drm_gem_object_put_unlocked(obj);
+       return ERR_PTR(ret);
+ }
+ static const struct drm_mode_config_funcs vbox_mode_funcs = {
+       .fb_create = vbox_user_framebuffer_create,
+       .atomic_check = drm_atomic_helper_check,
+       .atomic_commit = drm_atomic_helper_commit,
+ };
+ int vbox_mode_init(struct vbox_private *vbox)
+ {
+       struct drm_device *dev = &vbox->ddev;
+       struct drm_encoder *encoder;
+       struct vbox_crtc *vbox_crtc;
+       unsigned int i;
+       int ret;
+       drm_mode_config_init(dev);
+       dev->mode_config.funcs = (void *)&vbox_mode_funcs;
+       dev->mode_config.min_width = 0;
+       dev->mode_config.min_height = 0;
+       dev->mode_config.preferred_depth = 24;
+       dev->mode_config.max_width = VBE_DISPI_MAX_XRES;
+       dev->mode_config.max_height = VBE_DISPI_MAX_YRES;
+       for (i = 0; i < vbox->num_crtcs; ++i) {
+               vbox_crtc = vbox_crtc_init(dev, i);
+               if (IS_ERR(vbox_crtc)) {
+                       ret = PTR_ERR(vbox_crtc);
+                       goto err_drm_mode_cleanup;
+               }
+               encoder = vbox_encoder_init(dev, i);
+               if (!encoder) {
+                       ret = -ENOMEM;
+                       goto err_drm_mode_cleanup;
+               }
+               ret = vbox_connector_init(dev, vbox_crtc, encoder);
+               if (ret)
+                       goto err_drm_mode_cleanup;
+       }
+       drm_mode_config_reset(dev);
+       return 0;
+ err_drm_mode_cleanup:
+       drm_mode_config_cleanup(dev);
+       return ret;
+ }
+ void vbox_mode_fini(struct vbox_private *vbox)
+ {
+       drm_mode_config_cleanup(&vbox->ddev);
+ }
index 0000000000000000000000000000000000000000,d61985b0c6eb9c4c57dc16e3b91912e6c351284b..702b1aa534946f79a037b878376437201109ac22
mode 000000,100644..100644
--- /dev/null
@@@ -1,0 -1,56 +1,56 @@@
 -      return -ENOSYS;
+ // SPDX-License-Identifier: MIT
+ /*
+  * Copyright (C) 2017 Oracle Corporation
+  * Copyright 2017 Canonical
+  * Authors: Andreas Pokorny
+  */
+ #include "vbox_drv.h"
+ /*
+  * Based on qxl_prime.c:
+  * Empty Implementations as there should not be any other driver for a virtual
+  * device that might share buffers with vboxvideo
+  */
+ int vbox_gem_prime_pin(struct drm_gem_object *obj)
+ {
+       WARN_ONCE(1, "not implemented");
 -      return ERR_PTR(-ENOSYS);
++      return -ENODEV;
+ }
+ void vbox_gem_prime_unpin(struct drm_gem_object *obj)
+ {
+       WARN_ONCE(1, "not implemented");
+ }
+ struct sg_table *vbox_gem_prime_get_sg_table(struct drm_gem_object *obj)
+ {
+       WARN_ONCE(1, "not implemented");
 -      return ERR_PTR(-ENOSYS);
++      return ERR_PTR(-ENODEV);
+ }
+ struct drm_gem_object *vbox_gem_prime_import_sg_table(
+       struct drm_device *dev, struct dma_buf_attachment *attach,
+       struct sg_table *table)
+ {
+       WARN_ONCE(1, "not implemented");
 -      return ERR_PTR(-ENOSYS);
++      return ERR_PTR(-ENODEV);
+ }
+ void *vbox_gem_prime_vmap(struct drm_gem_object *obj)
+ {
+       WARN_ONCE(1, "not implemented");
 -      return -ENOSYS;
++      return ERR_PTR(-ENODEV);
+ }
+ void vbox_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
+ {
+       WARN_ONCE(1, "not implemented");
+ }
+ int vbox_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *area)
+ {
+       WARN_ONCE(1, "not implemented");
++      return -ENODEV;
+ }
index 1baa10e9448472510006b7390e3e574841c0163a,b2891ca0e7f4341ec18126fc7e33f5c2247a80f6..5e09389e15149f8b762677d3ac017238672ede9e
@@@ -35,6 -35,7 +35,7 @@@
  #include <drm/drm_atomic.h>
  #include <drm/drm_atomic_helper.h>
  #include <drm/drm_atomic_uapi.h>
+ #include <drm/drm_print.h>
  #include <drm/drm_probe_helper.h>
  #include <linux/clk.h>
  #include <drm/drm_fb_cma_helper.h>
@@@ -67,67 -68,22 +68,22 @@@ to_vc4_crtc_state(struct drm_crtc_stat
  #define CRTC_WRITE(offset, val) writel(val, vc4_crtc->regs + (offset))
  #define CRTC_READ(offset) readl(vc4_crtc->regs + (offset))
  
- #define CRTC_REG(reg) { reg, #reg }
- static const struct {
-       u32 reg;
-       const char *name;
- } crtc_regs[] = {
-       CRTC_REG(PV_CONTROL),
-       CRTC_REG(PV_V_CONTROL),
-       CRTC_REG(PV_VSYNCD_EVEN),
-       CRTC_REG(PV_HORZA),
-       CRTC_REG(PV_HORZB),
-       CRTC_REG(PV_VERTA),
-       CRTC_REG(PV_VERTB),
-       CRTC_REG(PV_VERTA_EVEN),
-       CRTC_REG(PV_VERTB_EVEN),
-       CRTC_REG(PV_INTEN),
-       CRTC_REG(PV_INTSTAT),
-       CRTC_REG(PV_STAT),
-       CRTC_REG(PV_HACT_ACT),
+ static const struct debugfs_reg32 crtc_regs[] = {
+       VC4_REG32(PV_CONTROL),
+       VC4_REG32(PV_V_CONTROL),
+       VC4_REG32(PV_VSYNCD_EVEN),
+       VC4_REG32(PV_HORZA),
+       VC4_REG32(PV_HORZB),
+       VC4_REG32(PV_VERTA),
+       VC4_REG32(PV_VERTB),
+       VC4_REG32(PV_VERTA_EVEN),
+       VC4_REG32(PV_VERTB_EVEN),
+       VC4_REG32(PV_INTEN),
+       VC4_REG32(PV_INTSTAT),
+       VC4_REG32(PV_STAT),
+       VC4_REG32(PV_HACT_ACT),
  };
  
- static void vc4_crtc_dump_regs(struct vc4_crtc *vc4_crtc)
- {
-       int i;
-       for (i = 0; i < ARRAY_SIZE(crtc_regs); i++) {
-               DRM_INFO("0x%04x (%s): 0x%08x\n",
-                        crtc_regs[i].reg, crtc_regs[i].name,
-                        CRTC_READ(crtc_regs[i].reg));
-       }
- }
- #ifdef CONFIG_DEBUG_FS
- int vc4_crtc_debugfs_regs(struct seq_file *m, void *unused)
- {
-       struct drm_info_node *node = (struct drm_info_node *)m->private;
-       struct drm_device *dev = node->minor->dev;
-       int crtc_index = (uintptr_t)node->info_ent->data;
-       struct drm_crtc *crtc;
-       struct vc4_crtc *vc4_crtc;
-       int i;
-       i = 0;
-       list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
-               if (i == crtc_index)
-                       break;
-               i++;
-       }
-       if (!crtc)
-               return 0;
-       vc4_crtc = to_vc4_crtc(crtc);
-       for (i = 0; i < ARRAY_SIZE(crtc_regs); i++) {
-               seq_printf(m, "%s (0x%04x): 0x%08x\n",
-                          crtc_regs[i].name, crtc_regs[i].reg,
-                          CRTC_READ(crtc_regs[i].reg));
-       }
-       return 0;
- }
- #endif
  bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
                             bool in_vblank_irq, int *vpos, int *hpos,
                             ktime_t *stime, ktime_t *etime,
@@@ -434,8 -390,10 +390,10 @@@ static void vc4_crtc_mode_set_nofb(stru
        bool debug_dump_regs = false;
  
        if (debug_dump_regs) {
-               DRM_INFO("CRTC %d regs before:\n", drm_crtc_index(crtc));
-               vc4_crtc_dump_regs(vc4_crtc);
+               struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev);
+               dev_info(&vc4_crtc->pdev->dev, "CRTC %d regs before:\n",
+                        drm_crtc_index(crtc));
+               drm_print_regset32(&p, &vc4_crtc->regset);
        }
  
        if (vc4_crtc->channel == 2) {
        vc4_crtc_lut_load(crtc);
  
        if (debug_dump_regs) {
-               DRM_INFO("CRTC %d regs after:\n", drm_crtc_index(crtc));
-               vc4_crtc_dump_regs(vc4_crtc);
+               struct drm_printer p = drm_info_printer(&vc4_crtc->pdev->dev);
+               dev_info(&vc4_crtc->pdev->dev, "CRTC %d regs after:\n",
+                        drm_crtc_index(crtc));
+               drm_print_regset32(&p, &vc4_crtc->regset);
        }
  }
  
@@@ -834,6 -794,14 +794,14 @@@ static void vc4_crtc_handle_page_flip(s
                drm_crtc_send_vblank_event(crtc, vc4_crtc->event);
                vc4_crtc->event = NULL;
                drm_crtc_vblank_put(crtc);
+               /* Wait for the page flip to unmask the underrun to ensure that
+                * the display list was updated by the hardware. Before that
+                * happens, the HVS will be using the previous display list with
+                * the CRTC and encoder already reconfigured, leading to
+                * underruns. This can be seen when reconfiguring the CRTC.
+                */
+               vc4_hvs_unmask_underrun(dev, vc4_crtc->channel);
        }
        spin_unlock_irqrestore(&dev->event_lock, flags);
  }
@@@ -1042,7 -1010,7 +1010,7 @@@ static voi
  vc4_crtc_reset(struct drm_crtc *crtc)
  {
        if (crtc->state)
 -              __drm_atomic_helper_crtc_destroy_state(crtc->state);
 +              vc4_crtc_destroy_state(crtc, crtc->state);
  
        crtc->state = kzalloc(sizeof(struct vc4_crtc_state), GFP_KERNEL);
        if (crtc->state)
@@@ -1075,6 -1043,7 +1043,7 @@@ static const struct drm_crtc_helper_fun
  
  static const struct vc4_crtc_data pv0_data = {
        .hvs_channel = 0,
+       .debugfs_name = "crtc0_regs",
        .encoder_types = {
                [PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI0,
                [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_DPI,
  
  static const struct vc4_crtc_data pv1_data = {
        .hvs_channel = 2,
+       .debugfs_name = "crtc1_regs",
        .encoder_types = {
                [PV_CONTROL_CLK_SELECT_DSI] = VC4_ENCODER_TYPE_DSI1,
                [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_SMI,
  
  static const struct vc4_crtc_data pv2_data = {
        .hvs_channel = 1,
+       .debugfs_name = "crtc2_regs",
        .encoder_types = {
                [PV_CONTROL_CLK_SELECT_DPI_SMI_HDMI] = VC4_ENCODER_TYPE_HDMI,
                [PV_CONTROL_CLK_SELECT_VEC] = VC4_ENCODER_TYPE_VEC,
@@@ -1169,11 -1140,16 +1140,16 @@@ static int vc4_crtc_bind(struct device 
        if (!match)
                return -ENODEV;
        vc4_crtc->data = match->data;
+       vc4_crtc->pdev = pdev;
  
        vc4_crtc->regs = vc4_ioremap_regs(pdev, 0);
        if (IS_ERR(vc4_crtc->regs))
                return PTR_ERR(vc4_crtc->regs);
  
+       vc4_crtc->regset.base = vc4_crtc->regs;
+       vc4_crtc->regset.regs = crtc_regs;
+       vc4_crtc->regset.nregs = ARRAY_SIZE(crtc_regs);
        /* For now, we create just the primary and the legacy cursor
         * planes.  We should be able to stack more planes on easily,
         * but to do that we would need to compute the bandwidth
  
        platform_set_drvdata(pdev, vc4_crtc);
  
+       vc4_debugfs_add_regset32(drm, vc4_crtc->data->debugfs_name,
+                                &vc4_crtc->regset);
        return 0;
  
  err_destroy_planes:
index af92964b6889dd0dbfaadac5558cb27ee78b3d56,7c2893181ba44efd41053f7ade90c0368bcce584..c50868753132a9bf3e64d00a8345e16703426715
@@@ -206,13 -206,9 +206,11 @@@ static struct drm_driver driver = 
        .debugfs_init = virtio_gpu_debugfs_init,
  #endif
        .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
 +      .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
        .gem_prime_export = drm_gem_prime_export,
        .gem_prime_import = drm_gem_prime_import,
-       .gem_prime_pin = virtgpu_gem_prime_pin,
-       .gem_prime_unpin = virtgpu_gem_prime_unpin,
        .gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
 +      .gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
        .gem_prime_vmap = virtgpu_gem_prime_vmap,
        .gem_prime_vunmap = virtgpu_gem_prime_vunmap,
        .gem_prime_mmap = virtgpu_gem_prime_mmap,
index d577cb76f5ad6b66d26124284159c82706f44699,491dec0712b3098b35a9996ef27e81948f018580..b69ae10ca238da251534feba2ef15afc4bc55159
  #define DRIVER_MINOR 1
  #define DRIVER_PATCHLEVEL 0
  
+ struct virtio_gpu_object_params {
+       uint32_t format;
+       uint32_t width;
+       uint32_t height;
+       unsigned long size;
+       bool dumb;
+       /* 3d */
+       bool virgl;
+       uint32_t target;
+       uint32_t bind;
+       uint32_t depth;
+       uint32_t array_size;
+       uint32_t last_level;
+       uint32_t nr_samples;
+       uint32_t flags;
+ };
  struct virtio_gpu_object {
        struct drm_gem_object gem_base;
        uint32_t hw_res_handle;
@@@ -204,6 -221,9 +221,9 @@@ struct virtio_gpu_fpriv 
  /* virtio_ioctl.c */
  #define DRM_VIRTIO_NUM_IOCTLS 10
  extern struct drm_ioctl_desc virtio_gpu_ioctls[DRM_VIRTIO_NUM_IOCTLS];
+ int virtio_gpu_object_list_validate(struct ww_acquire_ctx *ticket,
+                                   struct list_head *head);
+ void virtio_gpu_unref_list(struct list_head *head);
  
  /* virtio_kms.c */
  int virtio_gpu_init(struct drm_device *dev);
@@@ -217,16 -237,17 +237,17 @@@ int virtio_gpu_gem_init(struct virtio_g
  void virtio_gpu_gem_fini(struct virtio_gpu_device *vgdev);
  int virtio_gpu_gem_create(struct drm_file *file,
                          struct drm_device *dev,
-                         uint64_t size,
+                         struct virtio_gpu_object_params *params,
                          struct drm_gem_object **obj_p,
                          uint32_t *handle_p);
  int virtio_gpu_gem_object_open(struct drm_gem_object *obj,
                               struct drm_file *file);
  void virtio_gpu_gem_object_close(struct drm_gem_object *obj,
                                 struct drm_file *file);
- struct virtio_gpu_object *virtio_gpu_alloc_object(struct drm_device *dev,
-                                                 size_t size, bool kernel,
-                                                 bool pinned);
+ struct virtio_gpu_object*
+ virtio_gpu_alloc_object(struct drm_device *dev,
+                       struct virtio_gpu_object_params *params,
+                       struct virtio_gpu_fence *fence);
  int virtio_gpu_mode_dumb_create(struct drm_file *file_priv,
                                struct drm_device *dev,
                                struct drm_mode_create_dumb *args);
@@@ -243,9 -264,8 +264,8 @@@ int virtio_gpu_alloc_vbufs(struct virti
  void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev);
  void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
                                    struct virtio_gpu_object *bo,
-                                   uint32_t format,
-                                   uint32_t width,
-                                   uint32_t height);
+                                   struct virtio_gpu_object_params *params,
+                                   struct virtio_gpu_fence *fence);
  void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
                                   uint32_t resource_id);
  void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
@@@ -304,7 -324,8 +324,8 @@@ void virtio_gpu_cmd_transfer_to_host_3d
  void
  virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
                                  struct virtio_gpu_object *bo,
-                                 struct virtio_gpu_resource_create_3d *rc_3d);
+                                 struct virtio_gpu_object_params *params,
+                                 struct virtio_gpu_fence *fence);
  void virtio_gpu_ctrl_ack(struct virtqueue *vq);
  void virtio_gpu_cursor_ack(struct virtqueue *vq);
  void virtio_gpu_fence_ack(struct virtqueue *vq);
@@@ -332,6 -353,7 +353,7 @@@ void virtio_gpu_ttm_fini(struct virtio_
  int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma);
  
  /* virtio_gpu_fence.c */
+ bool virtio_fence_signaled(struct dma_fence *f);
  struct virtio_gpu_fence *virtio_gpu_fence_alloc(
        struct virtio_gpu_device *vgdev);
  int virtio_gpu_fence_emit(struct virtio_gpu_device *vgdev,
@@@ -342,8 -364,9 +364,9 @@@ void virtio_gpu_fence_event_process(str
  
  /* virtio_gpu_object */
  int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
-                            unsigned long size, bool kernel, bool pinned,
-                            struct virtio_gpu_object **bo_ptr);
+                            struct virtio_gpu_object_params *params,
+                            struct virtio_gpu_object **bo_ptr,
+                            struct virtio_gpu_fence *fence);
  void virtio_gpu_object_kunmap(struct virtio_gpu_object *bo);
  int virtio_gpu_object_kmap(struct virtio_gpu_object *bo);
  int virtio_gpu_object_get_sg_table(struct virtio_gpu_device *qdev,
@@@ -352,12 -375,7 +375,10 @@@ void virtio_gpu_object_free_sg_table(st
  int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
  
  /* virtgpu_prime.c */
- int virtgpu_gem_prime_pin(struct drm_gem_object *obj);
- void virtgpu_gem_prime_unpin(struct drm_gem_object *obj);
  struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
 +struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
 +      struct drm_device *dev, struct dma_buf_attachment *attach,
 +      struct sg_table *sgt);
  void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj);
  void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
  int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
index eb51a78e11991c01cce73d34cf74907cf9202764,22ef151410e078c1dfa325716a975e1145f27a4f..8fbf71bd0c5ebd3dcbebd99ecd5bdd774dbb6501
   * device that might share buffers with virtgpu
   */
  
int virtgpu_gem_prime_pin(struct drm_gem_object *obj)
struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
  {
-       WARN_ONCE(1, "not implemented");
-       return -ENODEV;
- }
+       struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
  
- void virtgpu_gem_prime_unpin(struct drm_gem_object *obj)
- {
-       WARN_ONCE(1, "not implemented");
- }
+       if (!bo->tbo.ttm->pages || !bo->tbo.ttm->num_pages)
+               /* should not happen */
+               return ERR_PTR(-EINVAL);
  
- struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
- {
-       return ERR_PTR(-ENODEV);
+       return drm_prime_pages_to_sg(bo->tbo.ttm->pages,
+                                    bo->tbo.ttm->num_pages);
  }
  
 +struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
 +      struct drm_device *dev, struct dma_buf_attachment *attach,
 +      struct sg_table *table)
 +{
 +      return ERR_PTR(-ENODEV);
 +}
 +
  void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj)
  {
        struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
@@@ -68,7 -57,10 +64,10 @@@ void virtgpu_gem_prime_vunmap(struct dr
  }
  
  int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
-                      struct vm_area_struct *area)
+                          struct vm_area_struct *vma)
  {
-       return -ENODEV;
+       struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
+       bo->gem_base.vma_node.vm_node.start = bo->tbo.vma_node.vm_node.start;
+       return drm_gem_prime_mmap(obj, vma);
  }
index 1bfa353d995cf5bb7ca4c4d1ce8a46ba686e55d6,be25ce9440adee242f1f913f42b18858fd3dca91..bf6c3500d363db7a99983a85833d9f05d46502e5
@@@ -545,14 -545,30 +545,14 @@@ static void vmw_get_initial_size(struc
        dev_priv->initial_height = height;
  }
  
 -/**
 - * vmw_assume_iommu - Figure out whether coherent dma-remapping might be
 - * taking place.
 - * @dev: Pointer to the struct drm_device.
 - *
 - * Return: true if iommu present, false otherwise.
 - */
 -static bool vmw_assume_iommu(struct drm_device *dev)
 -{
 -      const struct dma_map_ops *ops = get_dma_ops(dev->dev);
 -
 -      return !dma_is_direct(ops) && ops &&
 -              ops->map_page != dma_direct_map_page;
 -}
 -
  /**
   * vmw_dma_select_mode - Determine how DMA mappings should be set up for this
   * system.
   *
   * @dev_priv: Pointer to a struct vmw_private
   *
 - * This functions tries to determine the IOMMU setup and what actions
 - * need to be taken by the driver to make system pages visible to the
 - * device.
 + * This functions tries to determine what actions need to be taken by the
 + * driver to make system pages visible to the device.
   * If this function decides that DMA is not possible, it returns -EINVAL.
   * The driver may then try to disable features of the device that require
   * DMA.
@@@ -562,16 -578,23 +562,16 @@@ static int vmw_dma_select_mode(struct v
        static const char *names[vmw_dma_map_max] = {
                [vmw_dma_phys] = "Using physical TTM page addresses.",
                [vmw_dma_alloc_coherent] = "Using coherent TTM pages.",
 -              [vmw_dma_map_populate] = "Keeping DMA mappings.",
 +              [vmw_dma_map_populate] = "Caching DMA mappings.",
                [vmw_dma_map_bind] = "Giving up DMA mappings early."};
  
        if (vmw_force_coherent)
                dev_priv->map_mode = vmw_dma_alloc_coherent;
 -      else if (vmw_assume_iommu(dev_priv->dev))
 -              dev_priv->map_mode = vmw_dma_map_populate;
 -      else if (!vmw_force_iommu)
 -              dev_priv->map_mode = vmw_dma_phys;
 -      else if (IS_ENABLED(CONFIG_SWIOTLB) && swiotlb_nr_tbl())
 -              dev_priv->map_mode = vmw_dma_alloc_coherent;
 +      else if (vmw_restrict_iommu)
 +              dev_priv->map_mode = vmw_dma_map_bind;
        else
                dev_priv->map_mode = vmw_dma_map_populate;
  
 -      if (dev_priv->map_mode == vmw_dma_map_populate && vmw_restrict_iommu)
 -              dev_priv->map_mode = vmw_dma_map_bind;
 -
        /* No TTM coherent page pool? FIXME: Ask TTM instead! */
          if (!(IS_ENABLED(CONFIG_SWIOTLB) || IS_ENABLED(CONFIG_INTEL_IOMMU)) &&
            (dev_priv->map_mode == vmw_dma_alloc_coherent))
@@@ -828,7 -851,6 +828,6 @@@ static int vmw_driver_load(struct drm_d
        ret = ttm_bo_device_init(&dev_priv->bdev,
                                 &vmw_bo_driver,
                                 dev->anon_inode->i_mapping,
-                                VMWGFX_FILE_PAGE_OFFSET,
                                 false);
        if (unlikely(ret != 0)) {
                DRM_ERROR("Failed initializing TTM buffer object driver.\n");
diff --combined drivers/staging/Kconfig
index 86001a9f67e0ba6f02250ede25af7c92ab976f79,e166969033a25d5b1ea65e3ff1cfd0b90c3da0c4..d5f771fafc2172291242aeb4eff25c8b28eb7593
@@@ -1,6 -1,6 +1,6 @@@
 +# SPDX-License-Identifier: GPL-2.0
  menuconfig STAGING
        bool "Staging drivers"
 -      default n
        ---help---
          This option allows you to select a number of drivers that are
          not of the "normal" Linux kernel quality level.  These drivers
@@@ -16,8 -16,8 +16,8 @@@
  
          If you wish to work on these drivers, to help improve them, or
          to report problems you have with them, please see the
 -        driver_name.README file in the drivers/staging/ directory to
 -        see what needs to be worked on, and who to contact.
 +        drivers/staging/<driver_name>/TODO file to see what needs to be
 +        worked on, and who to contact.
  
          If in doubt, say N here.
  
@@@ -40,6 -40,8 +40,6 @@@ source "drivers/staging/rtl8712/Kconfig
  
  source "drivers/staging/rtl8188eu/Kconfig"
  
 -source "drivers/staging/rtlwifi/Kconfig"
 -
  source "drivers/staging/rts5208/Kconfig"
  
  source "drivers/staging/octeon/Kconfig"
@@@ -94,8 -96,6 +94,6 @@@ source "drivers/staging/greybus/Kconfig
  
  source "drivers/staging/vc04_services/Kconfig"
  
- source "drivers/staging/vboxvideo/Kconfig"
  source "drivers/staging/pi433/Kconfig"
  
  source "drivers/staging/mt7621-pci/Kconfig"
@@@ -104,10 -104,14 +102,10 @@@ source "drivers/staging/mt7621-pci-phy/
  
  source "drivers/staging/mt7621-pinctrl/Kconfig"
  
 -source "drivers/staging/mt7621-spi/Kconfig"
 -
  source "drivers/staging/mt7621-dma/Kconfig"
  
  source "drivers/staging/ralink-gdma/Kconfig"
  
 -source "drivers/staging/mt7621-mmc/Kconfig"
 -
  source "drivers/staging/mt7621-dts/Kconfig"
  
  source "drivers/staging/gasket/Kconfig"
@@@ -116,8 -120,4 +114,8 @@@ source "drivers/staging/axis-fifo/Kconf
  
  source "drivers/staging/erofs/Kconfig"
  
 +source "drivers/staging/fieldbus/Kconfig"
 +
 +source "drivers/staging/kpc2000/Kconfig"
 +
  endif # STAGING
diff --combined drivers/staging/Makefile
index dc3da72b3ff9b1d7af07531af40c7420968721cf,afa9dd4d3641a8be22b28f0be930ddfb8cc02fd8..0da0d3f0b5e4bec143513d824749eed9983e4a3a
@@@ -10,6 -10,7 +10,6 @@@ obj-$(CONFIG_RTL8192E)                += rtl8192e
  obj-$(CONFIG_RTL8723BS)               += rtl8723bs/
  obj-$(CONFIG_R8712U)          += rtl8712/
  obj-$(CONFIG_R8188EU)         += rtl8188eu/
 -obj-$(CONFIG_R8822BE)         += rtlwifi/
  obj-$(CONFIG_RTS5208)         += rts5208/
  obj-$(CONFIG_NETLOGIC_XLR_NET)        += netlogic/
  obj-$(CONFIG_OCTEON_ETHERNET) += octeon/
@@@ -37,16 -38,15 +37,15 @@@ obj-$(CONFIG_MOST)         += most
  obj-$(CONFIG_KS7010)          += ks7010/
  obj-$(CONFIG_GREYBUS)         += greybus/
  obj-$(CONFIG_BCM2835_VCHIQ)   += vc04_services/
- obj-$(CONFIG_DRM_VBOXVIDEO)   += vboxvideo/
  obj-$(CONFIG_PI433)           += pi433/
  obj-$(CONFIG_PCI_MT7621)      += mt7621-pci/
  obj-$(CONFIG_PCI_MT7621_PHY)  += mt7621-pci-phy/
  obj-$(CONFIG_PINCTRL_RT2880)  += mt7621-pinctrl/
 -obj-$(CONFIG_SPI_MT7621)      += mt7621-spi/
  obj-$(CONFIG_SOC_MT7621)      += mt7621-dma/
  obj-$(CONFIG_DMA_RALINK)      += ralink-gdma/
 -obj-$(CONFIG_MTK_MMC)         += mt7621-mmc/
  obj-$(CONFIG_SOC_MT7621)      += mt7621-dts/
  obj-$(CONFIG_STAGING_GASKET_FRAMEWORK)        += gasket/
  obj-$(CONFIG_XIL_AXIS_FIFO)   += axis-fifo/
  obj-$(CONFIG_EROFS_FS)                += erofs/
 +obj-$(CONFIG_FIELDBUS_DEV)     += fieldbus/
 +obj-$(CONFIG_KPC2000)         += kpc2000/
index c4da82dd15c72ca05c891237779556de434e5b52,67ce2037472d8a39fe37c57974a41f66f752dd7e..bdac3e7d7b1841fdf7dadd20ed01c56e3ce6962c
  
  struct dwc3_of_simple {
        struct device           *dev;
 -      struct clk              **clks;
 +      struct clk_bulk_data    *clks;
        int                     num_clocks;
        struct reset_control    *resets;
        bool                    pulse_resets;
        bool                    need_reset;
  };
  
 -static int dwc3_of_simple_clk_init(struct dwc3_of_simple *simple, int count)
 -{
 -      struct device           *dev = simple->dev;
 -      struct device_node      *np = dev->of_node;
 -      int                     i;
 -
 -      simple->num_clocks = count;
 -
 -      if (!count)
 -              return 0;
 -
 -      simple->clks = devm_kcalloc(dev, simple->num_clocks,
 -                      sizeof(struct clk *), GFP_KERNEL);
 -      if (!simple->clks)
 -              return -ENOMEM;
 -
 -      for (i = 0; i < simple->num_clocks; i++) {
 -              struct clk      *clk;
 -              int             ret;
 -
 -              clk = of_clk_get(np, i);
 -              if (IS_ERR(clk)) {
 -                      while (--i >= 0) {
 -                              clk_disable_unprepare(simple->clks[i]);
 -                              clk_put(simple->clks[i]);
 -                      }
 -                      return PTR_ERR(clk);
 -              }
 -
 -              ret = clk_prepare_enable(clk);
 -              if (ret < 0) {
 -                      while (--i >= 0) {
 -                              clk_disable_unprepare(simple->clks[i]);
 -                              clk_put(simple->clks[i]);
 -                      }
 -                      clk_put(clk);
 -
 -                      return ret;
 -              }
 -
 -              simple->clks[i] = clk;
 -      }
 -
 -      return 0;
 -}
 -
  static int dwc3_of_simple_probe(struct platform_device *pdev)
  {
        struct dwc3_of_simple   *simple;
@@@ -38,6 -84,7 +38,6 @@@
        struct device_node      *np = dev->of_node;
  
        int                     ret;
 -      int                     i;
        bool                    shared_resets = false;
  
        simple = devm_kzalloc(dev, sizeof(*simple), GFP_KERNEL);
                simple->pulse_resets = true;
        }
  
-       simple->resets = of_reset_control_array_get(np, shared_resets, true);
+       simple->resets = of_reset_control_array_get(np, shared_resets, true,
+                                                   true);
        if (IS_ERR(simple->resets)) {
                ret = PTR_ERR(simple->resets);
                dev_err(dev, "failed to get device resets, err=%d\n", ret);
                        goto err_resetc_put;
        }
  
 -      ret = dwc3_of_simple_clk_init(simple, of_count_phandle_with_args(np,
 -                                              "clocks", "#clock-cells"));
 +      ret = clk_bulk_get_all(simple->dev, &simple->clks);
 +      if (ret < 0)
 +              goto err_resetc_assert;
 +
 +      simple->num_clocks = ret;
 +      ret = clk_bulk_prepare_enable(simple->num_clocks, simple->clks);
        if (ret)
                goto err_resetc_assert;
  
        ret = of_platform_populate(np, NULL, NULL, dev);
 -      if (ret) {
 -              for (i = 0; i < simple->num_clocks; i++) {
 -                      clk_disable_unprepare(simple->clks[i]);
 -                      clk_put(simple->clks[i]);
 -              }
 -
 -              goto err_resetc_assert;
 -      }
 +      if (ret)
 +              goto err_clk_put;
  
        pm_runtime_set_active(dev);
        pm_runtime_enable(dev);
  
        return 0;
  
 +err_clk_put:
 +      clk_bulk_disable_unprepare(simple->num_clocks, simple->clks);
 +      clk_bulk_put_all(simple->num_clocks, simple->clks);
 +
  err_resetc_assert:
        if (!simple->pulse_resets)
                reset_control_assert(simple->resets);
@@@ -113,11 -159,14 +114,11 @@@ static int dwc3_of_simple_remove(struc
  {
        struct dwc3_of_simple   *simple = platform_get_drvdata(pdev);
        struct device           *dev = &pdev->dev;
 -      int                     i;
  
        of_platform_depopulate(dev);
  
 -      for (i = 0; i < simple->num_clocks; i++) {
 -              clk_disable_unprepare(simple->clks[i]);
 -              clk_put(simple->clks[i]);
 -      }
 +      clk_bulk_disable_unprepare(simple->num_clocks, simple->clks);
 +      clk_bulk_put_all(simple->num_clocks, simple->clks);
        simple->num_clocks = 0;
  
        if (!simple->pulse_resets)
  static int __maybe_unused dwc3_of_simple_runtime_suspend(struct device *dev)
  {
        struct dwc3_of_simple   *simple = dev_get_drvdata(dev);
 -      int                     i;
  
 -      for (i = 0; i < simple->num_clocks; i++)
 -              clk_disable(simple->clks[i]);
 +      clk_bulk_disable(simple->num_clocks, simple->clks);
  
        return 0;
  }
  static int __maybe_unused dwc3_of_simple_runtime_resume(struct device *dev)
  {
        struct dwc3_of_simple   *simple = dev_get_drvdata(dev);
 -      int                     ret;
 -      int                     i;
 -
 -      for (i = 0; i < simple->num_clocks; i++) {
 -              ret = clk_enable(simple->clks[i]);
 -              if (ret < 0) {
 -                      while (--i >= 0)
 -                              clk_disable(simple->clks[i]);
 -                      return ret;
 -              }
 -      }
  
 -      return 0;
 +      return clk_bulk_enable(simple->num_clocks, simple->clks);
  }
  
  static int __maybe_unused dwc3_of_simple_suspend(struct device *dev)
index 668ad971cd7b26828e2d95a4813ec3888d0abdac,61b80ec78e80da4f7a4a8a43a7f50fb86961e8e7..129dabbc002d4d5ceb0ebb5d0884ccf884765661
@@@ -420,6 -420,7 +420,6 @@@ extern struct ttm_bo_global 
        /**
         * Protected by ttm_global_mutex.
         */
 -      unsigned int use_count;
        struct list_head device_list;
  
        /**
@@@ -596,7 -597,7 +596,7 @@@ int ttm_bo_device_release(struct ttm_bo
  int ttm_bo_device_init(struct ttm_bo_device *bdev,
                       struct ttm_bo_driver *driver,
                       struct address_space *mapping,
-                      uint64_t file_page_offset, bool need_dma32);
+                      bool need_dma32);
  
  /**
   * ttm_bo_unmap_virtual