Merge branch 'akpm' (patches from Andrew)
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 24 May 2016 02:42:28 +0000 (19:42 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 24 May 2016 02:42:28 +0000 (19:42 -0700)
Merge yet more updates from Andrew Morton:

 - Oleg's "wait/ptrace: assume __WALL if the child is traced".  It's a
   kernel-based workaround for existing userspace issues.

 - A few hotfixes

 - befs cleanups

 - nilfs2 updates

 - sys_wait() changes

 - kexec updates

 - kdump

 - scripts/gdb updates

 - the last of the MM queue

 - a few other misc things

* emailed patches from Andrew Morton <akpm@linux-foundation.org>: (84 commits)
  kgdb: depends on VT
  drm/amdgpu: make amdgpu_mn_get wait for mmap_sem killable
  drm/radeon: make radeon_mn_get wait for mmap_sem killable
  drm/i915: make i915_gem_mmap_ioctl wait for mmap_sem killable
  uprobes: wait for mmap_sem for write killable
  prctl: make PR_SET_THP_DISABLE wait for mmap_sem killable
  exec: make exec path waiting for mmap_sem killable
  aio: make aio_setup_ring killable
  coredump: make coredump_wait wait for mmap_sem for write killable
  vdso: make arch_setup_additional_pages wait for mmap_sem for write killable
  ipc, shm: make shmem attach/detach wait for mmap_sem killable
  mm, fork: make dup_mmap wait for mmap_sem for write killable
  mm, proc: make clear_refs killable
  mm: make vm_brk killable
  mm, elf: handle vm_brk error
  mm, aout: handle vm_brk failures
  mm: make vm_munmap killable
  mm: make vm_mmap killable
  mm: make mmap_sem for write waits killable for mm syscalls
  MAINTAINERS: add co-maintainer for scripts/gdb
  ...

1  2 
MAINTAINERS
arch/tile/configs/tilegx_defconfig
arch/tile/configs/tilepro_defconfig
drivers/gpu/drm/i915/i915_gem.c

diff --combined MAINTAINERS
index f27ff1b3275721bad423257857661486bb4c6b2c,b9e63233511c8c05935e3812279e5ea2b7dafff2..33020068822807625aa8873116267df7d7138036
@@@ -856,12 -856,6 +856,12 @@@ S:       Maintaine
  F:    drivers/net/arcnet/
  F:    include/uapi/linux/if_arcnet.h
  
 +ARC PGU DRM DRIVER
 +M:    Alexey Brodkin <abrodkin@synopsys.com>
 +S:    Supported
 +F:    drivers/gpu/drm/arc/
 +F:    Documentation/devicetree/bindings/display/snps,arcpgu.txt
 +
  ARM HDLCD DRM DRIVER
  M:    Liviu Dudau <liviu.dudau@arm.com>
  S:    Supported
@@@ -1951,16 -1945,6 +1951,16 @@@ L:    platform-driver-x86@vger.kernel.or
  S:    Maintained
  F:    drivers/platform/x86/asus-wireless.c
  
 +ASYMMETRIC KEYS
 +M:    David Howells <dhowells@redhat.com>
 +L:    keyrings@vger.kernel.org
 +S:    Maintained
 +F:    Documentation/crypto/asymmetric-keys.txt
 +F:    include/linux/verification.h
 +F:    include/crypto/public_key.h
 +F:    include/crypto/pkcs7.h
 +F:    crypto/asymmetric_keys/
 +
  ASYNCHRONOUS TRANSFERS/TRANSFORMS (IOAT) API
  R:    Dan Williams <dan.j.williams@intel.com>
  W:    http://sourceforge.net/projects/xscaleiop
@@@ -3853,25 -3837,9 +3853,25 @@@ T:    git git://people.freedesktop.org/~ai
  S:    Maintained
  F:    drivers/gpu/drm/
  F:    drivers/gpu/vga/
 +F:    Documentation/DocBook/gpu.*
  F:    include/drm/
  F:    include/uapi/drm/
  
 +DRM DRIVER FOR AST SERVER GRAPHICS CHIPS
 +M:    Dave Airlie <airlied@redhat.com>
 +S:    Odd Fixes
 +F:    drivers/gpu/drm/ast/
 +
 +DRM DRIVER FOR BOCHS VIRTUAL GPU
 +M:    Gerd Hoffmann <kraxel@redhat.com>
 +S:    Odd Fixes
 +F:    drivers/gpu/drm/bochs/
 +
 +DRM DRIVER FOR QEMU'S CIRRUS DEVICE
 +M:    Dave Airlie <airlied@redhat.com>
 +S:    Odd Fixes
 +F:    drivers/gpu/drm/cirrus/
 +
  RADEON and AMDGPU DRM DRIVERS
  M:    Alex Deucher <alexander.deucher@amd.com>
  M:    Christian König <christian.koenig@amd.com>
@@@ -3879,9 -3847,9 +3879,9 @@@ L:      dri-devel@lists.freedesktop.or
  T:    git git://people.freedesktop.org/~agd5f/linux
  S:    Supported
  F:    drivers/gpu/drm/radeon/
 -F:    include/uapi/drm/radeon*
 +F:    include/uapi/drm/radeon_drm.h
  F:    drivers/gpu/drm/amd/
 -F:    include/uapi/drm/amdgpu*
 +F:    include/uapi/drm/amdgpu_drm.h
  
  DRM PANEL DRIVERS
  M:    Thierry Reding <thierry.reding@gmail.com>
@@@ -3904,7 -3872,7 +3904,7 @@@ T:      git git://anongit.freedesktop.org/dr
  S:    Supported
  F:    drivers/gpu/drm/i915/
  F:    include/drm/i915*
 -F:    include/uapi/drm/i915*
 +F:    include/uapi/drm/i915_drm.h
  
  DRM DRIVERS FOR ATMEL HLCDC
  M:    Boris Brezillon <boris.brezillon@free-electrons.com>
@@@ -3913,13 -3881,6 +3913,13 @@@ S:    Supporte
  F:    drivers/gpu/drm/atmel-hlcdc/
  F:    Documentation/devicetree/bindings/drm/atmel/
  
 +DRM DRIVERS FOR ALLWINNER A10
 +M:    Maxime Ripard  <maxime.ripard@free-electrons.com>
 +L:    dri-devel@lists.freedesktop.org
 +S:    Supported
 +F:    drivers/gpu/drm/sun4i/
 +F:    Documentation/devicetree/bindings/display/sunxi/sun4i-drm.txt
 +
  DRM DRIVERS FOR EXYNOS
  M:    Inki Dae <inki.dae@samsung.com>
  M:    Joonyoung Shim <jy0922.shim@samsung.com>
@@@ -3929,8 -3890,8 +3929,8 @@@ L:      dri-devel@lists.freedesktop.or
  T:    git git://git.kernel.org/pub/scm/linux/kernel/git/daeinki/drm-exynos.git
  S:    Supported
  F:    drivers/gpu/drm/exynos/
 -F:    include/drm/exynos*
 -F:    include/uapi/drm/exynos*
 +F:    include/uapi/drm/exynos_drm.h
 +F:    Documentation/devicetree/bindings/display/exynos/
  
  DRM DRIVERS FOR FREESCALE DCU
  M:    Stefan Agner <stefan@agner.ch>
@@@ -3939,7 -3900,6 +3939,7 @@@ L:      dri-devel@lists.freedesktop.or
  S:    Supported
  F:    drivers/gpu/drm/fsl-dcu/
  F:    Documentation/devicetree/bindings/display/fsl,dcu.txt
 +F:    Documentation/devicetree/bindings/display/fsl,tcon.txt
  F:    Documentation/devicetree/bindings/display/panel/nec,nl4827hc19_05b.txt
  
  DRM DRIVERS FOR FREESCALE IMX
@@@ -3955,45 -3915,12 +3955,45 @@@ M:   Patrik Jakobsson <patrik.r.jakobsson
  L:    dri-devel@lists.freedesktop.org
  T:    git git://github.com/patjak/drm-gma500
  S:    Maintained
 -F:    drivers/gpu/drm/gma500
 -F:    include/drm/gma500*
 +F:    drivers/gpu/drm/gma500/
 +
 +DRM DRIVERS FOR HISILICON
 +M:    Xinliang Liu <z.liuxinliang@hisilicon.com>
 +R:    Xinwei Kong <kong.kongxinwei@hisilicon.com>
 +R:    Chen Feng <puck.chen@hisilicon.com>
 +L:    dri-devel@lists.freedesktop.org
 +T:    git git://github.com/xin3liang/linux.git
 +S:    Maintained
 +F:    drivers/gpu/drm/hisilicon/
 +F:    Documentation/devicetree/bindings/display/hisilicon/
 +
 +DRM DRIVER FOR INTEL I810 VIDEO CARDS
 +S:    Orphan / Obsolete
 +F:    drivers/gpu/drm/i810/
 +F:    include/uapi/drm/i810_drm.h
 +
 +DRM DRIVER FOR MSM ADRENO GPU
 +M:    Rob Clark <robdclark@gmail.com>
 +L:    linux-arm-msm@vger.kernel.org
 +L:    dri-devel@lists.freedesktop.org
 +L:    freedreno@lists.freedesktop.org
 +T:    git git://people.freedesktop.org/~robclark/linux
 +S:    Maintained
 +F:    drivers/gpu/drm/msm/
 +F:    include/uapi/drm/msm_drm.h
 +F:    Documentation/devicetree/bindings/display/msm/
 +
 +DRM DRIVER FOR NVIDIA GEFORCE/QUADRO GPUS
 +M:    Ben Skeggs <bskeggs@redhat.com>
 +L:    dri-devel@lists.freedesktop.org
 +L:    nouveau@lists.freedesktop.org
 +T:    git git://github.com/skeggsb/linux
 +S:    Supported
 +F:    drivers/gpu/drm/nouveau/
 +F:    include/uapi/drm/nouveau_drm.h
  
  DRM DRIVERS FOR NVIDIA TEGRA
  M:    Thierry Reding <thierry.reding@gmail.com>
 -M:    Terje Bergström <tbergstrom@nvidia.com>
  L:    dri-devel@lists.freedesktop.org
  L:    linux-tegra@vger.kernel.org
  T:    git git://anongit.freedesktop.org/tegra/linux.git
@@@ -4004,54 -3931,22 +4004,54 @@@ F:   include/linux/host1x.
  F:    include/uapi/drm/tegra_drm.h
  F:    Documentation/devicetree/bindings/display/tegra/nvidia,tegra20-host1x.txt
  
 +DRM DRIVER FOR MATROX G200/G400 GRAPHICS CARDS
 +S:    Orphan / Obsolete
 +F:    drivers/gpu/drm/mga/
 +F:    include/uapi/drm/mga_drm.h
 +
 +DRM DRIVER FOR MGA G200 SERVER GRAPHICS CHIPS
 +M:    Dave Airlie <airlied@redhat.com>
 +S:    Odd Fixes
 +F:    drivers/gpu/drm/mgag200/
 +
 +DRM DRIVER FOR RAGE 128 VIDEO CARDS
 +S:    Orphan / Obsolete
 +F:    drivers/gpu/drm/r128/
 +F:    include/uapi/drm/r128_drm.h
 +
  DRM DRIVERS FOR RENESAS
  M:    Laurent Pinchart <laurent.pinchart@ideasonboard.com>
  L:    dri-devel@lists.freedesktop.org
  L:    linux-renesas-soc@vger.kernel.org
 -T:    git git://people.freedesktop.org/~airlied/linux
 +T:    git git://linuxtv.org/pinchartl/fbdev
  S:    Supported
  F:    drivers/gpu/drm/rcar-du/
  F:    drivers/gpu/drm/shmobile/
  F:    include/linux/platform_data/shmob_drm.h
 +F:    Documentation/devicetree/bindings/display/renesas,du.txt
 +
 +DRM DRIVER FOR QXL VIRTUAL GPU
 +M:    Dave Airlie <airlied@redhat.com>
 +S:    Odd Fixes
 +F:    drivers/gpu/drm/qxl/
 +F:    include/uapi/drm/qxl_drm.h
  
  DRM DRIVERS FOR ROCKCHIP
  M:    Mark Yao <mark.yao@rock-chips.com>
  L:    dri-devel@lists.freedesktop.org
  S:    Maintained
  F:    drivers/gpu/drm/rockchip/
 -F:    Documentation/devicetree/bindings/display/rockchip*
 +F:    Documentation/devicetree/bindings/display/rockchip/
 +
 +DRM DRIVER FOR SAVAGE VIDEO CARDS
 +S:    Orphan / Obsolete
 +F:    drivers/gpu/drm/savage/
 +F:    include/uapi/drm/savage_drm.h
 +
 +DRM DRIVER FOR SIS VIDEO CARDS
 +S:    Orphan / Obsolete
 +F:    drivers/gpu/drm/sis/
 +F:    include/uapi/drm/sis_drm.h
  
  DRM DRIVERS FOR STI
  M:    Benjamin Gaignard <benjamin.gaignard@linaro.org>
@@@ -4062,43 -3957,14 +4062,43 @@@ S:   Maintaine
  F:    drivers/gpu/drm/sti
  F:    Documentation/devicetree/bindings/display/st,stih4xx.txt
  
 +DRM DRIVER FOR TDFX VIDEO CARDS
 +S:    Orphan / Obsolete
 +F:    drivers/gpu/drm/tdfx/
 +
 +DRM DRIVER FOR USB DISPLAYLINK VIDEO ADAPTERS
 +M:    Dave Airlie <airlied@redhat.com>
 +S:    Odd Fixes
 +F:    drivers/gpu/drm/udl/
 +
  DRM DRIVERS FOR VIVANTE GPU IP
  M:    Lucas Stach <l.stach@pengutronix.de>
  R:    Russell King <linux+etnaviv@armlinux.org.uk>
  R:    Christian Gmeiner <christian.gmeiner@gmail.com>
  L:    dri-devel@lists.freedesktop.org
  S:    Maintained
 -F:    drivers/gpu/drm/etnaviv
 -F:    Documentation/devicetree/bindings/display/etnaviv
 +F:    drivers/gpu/drm/etnaviv/
 +F:    include/uapi/drm/etnaviv_drm.h
 +F:    Documentation/devicetree/bindings/display/etnaviv/
 +
 +DRM DRIVER FOR VMWARE VIRTUAL GPU
 +M:    "VMware Graphics" <linux-graphics-maintainer@vmware.com>
 +M:    Sinclair Yeh <syeh@vmware.com>
 +M:    Thomas Hellstrom <thellstrom@vmware.com>
 +L:    dri-devel@lists.freedesktop.org
 +T:    git git://people.freedesktop.org/~syeh/repos_linux
 +T:    git git://people.freedesktop.org/~thomash/linux
 +S:    Supported
 +F:    drivers/gpu/drm/vmwgfx/
 +F:    include/uapi/drm/vmwgfx_drm.h
 +
 +DRM DRIVERS FOR VC4
 +M:    Eric Anholt <eric@anholt.net>
 +T:    git git://github.com/anholt/linux
 +S:    Supported
 +F:    drivers/gpu/drm/vc4/
 +F:    include/uapi/drm/vc4_drm.h
 +F:    Documentation/devicetree/bindings/display/brcm,bcm-vc4.txt
  
  DSBR100 USB FM RADIO DRIVER
  M:    Alexey Klimov <klimov.linux@gmail.com>
@@@ -4993,6 -4859,7 +4993,7 @@@ F:      drivers/scsi/gdt
  
  GDB KERNEL DEBUGGING HELPER SCRIPTS
  M:    Jan Kiszka <jan.kiszka@siemens.com>
+ M:    Kieran Bingham <kieran@bingham.xyz>
  S:    Supported
  F:    scripts/gdb/
  
@@@ -6567,8 -6434,6 +6568,8 @@@ S:      Maintaine
  F:    Documentation/security/keys.txt
  F:    include/linux/key.h
  F:    include/linux/key-type.h
 +F:    include/linux/keyctl.h
 +F:    include/uapi/linux/keyctl.h
  F:    include/keys/
  F:    security/keys/
  
@@@ -7141,8 -7006,6 +7142,8 @@@ MARVELL ARMADA DRM SUPPOR
  M:    Russell King <rmk+kernel@armlinux.org.uk>
  S:    Maintained
  F:    drivers/gpu/drm/armada/
 +F:    include/uapi/drm/armada_drm.h
 +F:    Documentation/devicetree/bindings/display/armada/
  
  MARVELL 88E6352 DSA support
  M:    Guenter Roeck <linux@roeck-us.net>
@@@ -8038,6 -7901,7 +8039,7 @@@ NILFS2 FILESYSTE
  M:    Ryusuke Konishi <konishi.ryusuke@lab.ntt.co.jp>
  L:    linux-nilfs@vger.kernel.org
  W:    http://nilfs.sourceforge.net/
+ W:    http://nilfs.osdn.jp/
  T:    git git://github.com/konis/nilfs2.git
  S:    Supported
  F:    Documentation/filesystems/nilfs2.txt
index dea47c31ab16fb22d26fcc7e3933e57a6b5c9ed5,48357a4e30a0b1722ae4602086ef5bb1a32bb1c9..fd122ef45b0043ce0c091dbe617040c95721cd49
@@@ -16,7 -16,6 +16,6 @@@ CONFIG_CGROUP_DEBUG=
  CONFIG_CGROUP_DEVICE=y
  CONFIG_CPUSETS=y
  CONFIG_CGROUP_CPUACCT=y
- CONFIG_RESOURCE_COUNTERS=y
  CONFIG_CGROUP_SCHED=y
  CONFIG_RT_GROUP_SCHED=y
  CONFIG_BLK_CGROUP=y
@@@ -89,6 -88,7 +88,6 @@@ CONFIG_TCP_CONG_YEAH=
  CONFIG_TCP_CONG_ILLINOIS=m
  CONFIG_TCP_MD5SIG=y
  CONFIG_IPV6=y
 -CONFIG_IPV6_PRIVACY=y
  CONFIG_IPV6_ROUTER_PREF=y
  CONFIG_IPV6_ROUTE_INFO=y
  CONFIG_IPV6_OPTIMISTIC_DAD=y
index 95743eedf7472ad4df4f470b313fc330e18b3941,a56c58424a22ee1043514aab2cc938041c72c240..eb6a55944191bc2ea03c834f1e034beb8d14d036
@@@ -15,7 -15,6 +15,6 @@@ CONFIG_CGROUP_DEBUG=
  CONFIG_CGROUP_DEVICE=y
  CONFIG_CPUSETS=y
  CONFIG_CGROUP_CPUACCT=y
- CONFIG_RESOURCE_COUNTERS=y
  CONFIG_CGROUP_SCHED=y
  CONFIG_RT_GROUP_SCHED=y
  CONFIG_BLK_CGROUP=y
@@@ -85,6 -84,7 +84,6 @@@ CONFIG_TCP_CONG_YEAH=
  CONFIG_TCP_CONG_ILLINOIS=m
  CONFIG_TCP_MD5SIG=y
  CONFIG_IPV6=y
 -CONFIG_IPV6_PRIVACY=y
  CONFIG_IPV6_ROUTER_PREF=y
  CONFIG_IPV6_ROUTE_INFO=y
  CONFIG_IPV6_OPTIMISTIC_DAD=y
index 94bbc4314ac5a3a8190fda2f23da1143eb8f5c13,ddd1d78c346f39ee4e6953de40ea97267634105c..9b99490e8367a2563ebd136c2b7282d92348c502
  #include "i915_vgpu.h"
  #include "i915_trace.h"
  #include "intel_drv.h"
 +#include "intel_mocs.h"
  #include <linux/shmem_fs.h>
  #include <linux/slab.h>
  #include <linux/swap.h>
  #include <linux/pci.h>
  #include <linux/dma-buf.h>
  
 -#define RQ_BUG_ON(expr)
 -
  static void i915_gem_object_flush_gtt_write_domain(struct drm_i915_gem_object *obj);
  static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj);
  static void
@@@ -84,7 -85,9 +84,7 @@@ i915_gem_wait_for_error(struct i915_gpu
  {
        int ret;
  
 -#define EXIT_COND (!i915_reset_in_progress(error) || \
 -                 i915_terminally_wedged(error))
 -      if (EXIT_COND)
 +      if (!i915_reset_in_progress(error))
                return 0;
  
        /*
         * we should simply try to bail out and fail as gracefully as possible.
         */
        ret = wait_event_interruptible_timeout(error->reset_queue,
 -                                             EXIT_COND,
 +                                             !i915_reset_in_progress(error),
                                               10*HZ);
        if (ret == 0) {
                DRM_ERROR("Timed out waiting for the gpu reset to complete\n");
                return -EIO;
        } else if (ret < 0) {
                return ret;
 +      } else {
 +              return 0;
        }
 -#undef EXIT_COND
 -
 -      return 0;
  }
  
  int i915_mutex_lock_interruptible(struct drm_device *dev)
@@@ -126,9 -130,9 +126,9 @@@ in
  i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
                            struct drm_file *file)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = to_i915(dev);
 +      struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct drm_i915_gem_get_aperture *args = data;
 -      struct i915_gtt *ggtt = &dev_priv->gtt;
        struct i915_vma *vma;
        size_t pinned;
  
                        pinned += vma->node.size;
        mutex_unlock(&dev->struct_mutex);
  
 -      args->aper_size = dev_priv->gtt.base.total;
 +      args->aper_size = ggtt->base.total;
        args->aper_available_size = args->aper_size - pinned;
  
        return 0;
@@@ -207,10 -211,11 +207,10 @@@ i915_gem_object_put_pages_phys(struct d
        BUG_ON(obj->madv == __I915_MADV_PURGED);
  
        ret = i915_gem_object_set_to_cpu_domain(obj, true);
 -      if (ret) {
 +      if (WARN_ON(ret)) {
                /* In the event of a disaster, abandon all caches and
                 * hope for the best.
                 */
 -              WARN_ON(ret != -EIO);
                obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
        }
  
@@@ -695,7 -700,7 +695,7 @@@ i915_gem_pread_ioctl(struct drm_device 
        if (ret)
                return ret;
  
 -      obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 +      obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
        if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
@@@ -760,8 -765,7 +760,8 @@@ i915_gem_gtt_pwrite_fast(struct drm_dev
                         struct drm_i915_gem_pwrite *args,
                         struct drm_file *file)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = to_i915(dev);
 +      struct i915_ggtt *ggtt = &dev_priv->ggtt;
        ssize_t remain;
        loff_t offset, page_base;
        char __user *user_data;
                 * source page isn't available.  Return the error and we'll
                 * retry in the slow path.
                 */
 -              if (fast_user_write(dev_priv->gtt.mappable, page_base,
 +              if (fast_user_write(ggtt->mappable, page_base,
                                    page_offset, user_data, page_length)) {
                        ret = -EFAULT;
                        goto out_flush;
@@@ -1049,7 -1053,7 +1049,7 @@@ i915_gem_pwrite_ioctl(struct drm_devic
        if (ret)
                goto put_rpm;
  
 -      obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 +      obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
        if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
@@@ -1105,19 -1109,27 +1105,19 @@@ put_rpm
        return ret;
  }
  
 -int
 -i915_gem_check_wedge(struct i915_gpu_error *error,
 -                   bool interruptible)
 +static int
 +i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
  {
 -      if (i915_reset_in_progress(error)) {
 +      if (__i915_terminally_wedged(reset_counter))
 +              return -EIO;
 +
 +      if (__i915_reset_in_progress(reset_counter)) {
                /* Non-interruptible callers can't handle -EAGAIN, hence return
                 * -EIO unconditionally for these. */
                if (!interruptible)
                        return -EIO;
  
 -              /* Recovery complete, but the reset failed ... */
 -              if (i915_terminally_wedged(error))
 -                      return -EIO;
 -
 -              /*
 -               * Check if GPU Reset is in progress - we need intel_ring_begin
 -               * to work properly to reinit the hw state while the gpu is
 -               * still marked as reset-in-progress. Handle this with a flag.
 -               */
 -              if (!error->reload_in_reset)
 -                      return -EAGAIN;
 +              return -EAGAIN;
        }
  
        return 0;
@@@ -1129,9 -1141,9 +1129,9 @@@ static void fake_irq(unsigned long data
  }
  
  static bool missed_irq(struct drm_i915_private *dev_priv,
 -                     struct intel_engine_cs *ring)
 +                     struct intel_engine_cs *engine)
  {
 -      return test_bit(ring->id, &dev_priv->gpu_error.missed_irq_rings);
 +      return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings);
  }
  
  static unsigned long local_clock_us(unsigned *cpu)
@@@ -1181,7 -1193,7 +1181,7 @@@ static int __i915_spin_request(struct d
         * takes to sleep on a request, on the order of a microsecond.
         */
  
 -      if (req->ring->irq_refcount)
 +      if (req->engine->irq_refcount)
                return -EBUSY;
  
        /* Only spin if we know the GPU is processing this request */
  /**
   * __i915_wait_request - wait until execution of request has finished
   * @req: duh!
 - * @reset_counter: reset sequence associated with the given request
   * @interruptible: do an interruptible wait (normally yes)
   * @timeout: in - how long to wait (NULL forever); out - how much time remaining
   *
   * errno with remaining time filled in timeout argument.
   */
  int __i915_wait_request(struct drm_i915_gem_request *req,
 -                      unsigned reset_counter,
                        bool interruptible,
                        s64 *timeout,
                        struct intel_rps_client *rps)
  {
 -      struct intel_engine_cs *ring = i915_gem_request_get_ring(req);
 -      struct drm_device *dev = ring->dev;
 +      struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
 +      struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        const bool irq_test_in_progress =
 -              ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_ring_flag(ring);
 +              ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
        int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
        DEFINE_WAIT(wait);
        unsigned long timeout_expire;
        if (ret == 0)
                goto out;
  
 -      if (!irq_test_in_progress && WARN_ON(!ring->irq_get(ring))) {
 +      if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) {
                ret = -ENODEV;
                goto out;
        }
        for (;;) {
                struct timer_list timer;
  
 -              prepare_to_wait(&ring->irq_queue, &wait, state);
 +              prepare_to_wait(&engine->irq_queue, &wait, state);
  
                /* We need to check whether any gpu reset happened in between
 -               * the caller grabbing the seqno and now ... */
 -              if (reset_counter != atomic_read(&dev_priv->gpu_error.reset_counter)) {
 -                      /* ... but upgrade the -EAGAIN to an -EIO if the gpu
 -                       * is truely gone. */
 -                      ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
 -                      if (ret == 0)
 -                              ret = -EAGAIN;
 +               * the request being submitted and now. If a reset has occurred,
 +               * the request is effectively complete (we either are in the
 +               * process of or have discarded the rendering and completely
 +               * reset the GPU. The results of the request are lost and we
 +               * are free to continue on with the original operation.
 +               */
 +              if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
 +                      ret = 0;
                        break;
                }
  
                }
  
                timer.function = NULL;
 -              if (timeout || missed_irq(dev_priv, ring)) {
 +              if (timeout || missed_irq(dev_priv, engine)) {
                        unsigned long expire;
  
                        setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
 -                      expire = missed_irq(dev_priv, ring) ? jiffies + 1 : timeout_expire;
 +                      expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire;
                        mod_timer(&timer, expire);
                }
  
                }
        }
        if (!irq_test_in_progress)
 -              ring->irq_put(ring);
 +              engine->irq_put(engine);
  
 -      finish_wait(&ring->irq_queue, &wait);
 +      finish_wait(&engine->irq_queue, &wait);
  
  out:
        trace_i915_gem_request_wait_end(req);
  int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
                                   struct drm_file *file)
  {
 -      struct drm_i915_private *dev_private;
        struct drm_i915_file_private *file_priv;
  
        WARN_ON(!req || !file || req->file_priv);
        if (req->file_priv)
                return -EINVAL;
  
 -      dev_private = req->ring->dev->dev_private;
        file_priv = file->driver_priv;
  
        spin_lock(&file_priv->mm.lock);
@@@ -1419,7 -1434,7 +1419,7 @@@ static void i915_gem_request_retire(str
  static void
  __i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
  {
 -      struct intel_engine_cs *engine = req->ring;
 +      struct intel_engine_cs *engine = req->engine;
        struct drm_i915_gem_request *tmp;
  
        lockdep_assert_held(&engine->dev->struct_mutex);
  int
  i915_wait_request(struct drm_i915_gem_request *req)
  {
 -      struct drm_device *dev;
 -      struct drm_i915_private *dev_priv;
 +      struct drm_i915_private *dev_priv = req->i915;
        bool interruptible;
        int ret;
  
 -      BUG_ON(req == NULL);
 -
 -      dev = req->ring->dev;
 -      dev_priv = dev->dev_private;
        interruptible = dev_priv->mm.interruptible;
  
 -      BUG_ON(!mutex_is_locked(&dev->struct_mutex));
 -
 -      ret = i915_gem_check_wedge(&dev_priv->gpu_error, interruptible);
 -      if (ret)
 -              return ret;
 +      BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
  
 -      ret = __i915_wait_request(req,
 -                                atomic_read(&dev_priv->gpu_error.reset_counter),
 -                                interruptible, NULL, NULL);
 +      ret = __i915_wait_request(req, interruptible, NULL, NULL);
        if (ret)
                return ret;
  
@@@ -1479,14 -1505,14 +1479,14 @@@ i915_gem_object_wait_rendering(struct d
                        if (ret)
                                return ret;
  
 -                      i = obj->last_write_req->ring->id;
 +                      i = obj->last_write_req->engine->id;
                        if (obj->last_read_req[i] == obj->last_write_req)
                                i915_gem_object_retire__read(obj, i);
                        else
                                i915_gem_object_retire__write(obj);
                }
        } else {
 -              for (i = 0; i < I915_NUM_RINGS; i++) {
 +              for (i = 0; i < I915_NUM_ENGINES; i++) {
                        if (obj->last_read_req[i] == NULL)
                                continue;
  
  
                        i915_gem_object_retire__read(obj, i);
                }
 -              RQ_BUG_ON(obj->active);
 +              GEM_BUG_ON(obj->active);
        }
  
        return 0;
@@@ -1506,7 -1532,7 +1506,7 @@@ static voi
  i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
                               struct drm_i915_gem_request *req)
  {
 -      int ring = req->ring->id;
 +      int ring = req->engine->id;
  
        if (obj->last_read_req[ring] == req)
                i915_gem_object_retire__read(obj, ring);
@@@ -1526,7 -1552,8 +1526,7 @@@ i915_gem_object_wait_rendering__nonbloc
  {
        struct drm_device *dev = obj->base.dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct drm_i915_gem_request *requests[I915_NUM_RINGS];
 -      unsigned reset_counter;
 +      struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
        int ret, i, n = 0;
  
        BUG_ON(!mutex_is_locked(&dev->struct_mutex));
        if (!obj->active)
                return 0;
  
 -      ret = i915_gem_check_wedge(&dev_priv->gpu_error, true);
 -      if (ret)
 -              return ret;
 -
 -      reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
 -
        if (readonly) {
                struct drm_i915_gem_request *req;
  
  
                requests[n++] = i915_gem_request_reference(req);
        } else {
 -              for (i = 0; i < I915_NUM_RINGS; i++) {
 +              for (i = 0; i < I915_NUM_ENGINES; i++) {
                        struct drm_i915_gem_request *req;
  
                        req = obj->last_read_req[i];
        }
  
        mutex_unlock(&dev->struct_mutex);
 +      ret = 0;
        for (i = 0; ret == 0 && i < n; i++)
 -              ret = __i915_wait_request(requests[i], reset_counter, true,
 -                                        NULL, rps);
 +              ret = __i915_wait_request(requests[i], true, NULL, rps);
        mutex_lock(&dev->struct_mutex);
  
        for (i = 0; i < n; i++) {
@@@ -1607,7 -1640,7 +1607,7 @@@ i915_gem_set_domain_ioctl(struct drm_de
        if (ret)
                return ret;
  
 -      obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 +      obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
        if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
@@@ -1655,7 -1688,7 +1655,7 @@@ i915_gem_sw_finish_ioctl(struct drm_dev
        if (ret)
                return ret;
  
 -      obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 +      obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
        if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
@@@ -1702,7 -1735,7 +1702,7 @@@ i915_gem_mmap_ioctl(struct drm_device *
        if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
                return -ENODEV;
  
 -      obj = drm_gem_object_lookup(dev, file, args->handle);
 +      obj = drm_gem_object_lookup(file, args->handle);
        if (obj == NULL)
                return -ENOENT;
  
                struct mm_struct *mm = current->mm;
                struct vm_area_struct *vma;
  
-               down_write(&mm->mmap_sem);
+               if (down_write_killable(&mm->mmap_sem)) {
+                       drm_gem_object_unreference_unlocked(obj);
+                       return -EINTR;
+               }
                vma = find_vma(mm, addr);
                if (vma)
                        vma->vm_page_prot =
@@@ -1759,8 -1795,7 +1762,8 @@@ int i915_gem_fault(struct vm_area_struc
  {
        struct drm_i915_gem_object *obj = to_intel_bo(vma->vm_private_data);
        struct drm_device *dev = obj->base.dev;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = to_i915(dev);
 +      struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct i915_ggtt_view view = i915_ggtt_view_normal;
        pgoff_t page_offset;
        unsigned long pfn;
        }
  
        /* Use a partial view if the object is bigger than the aperture. */
 -      if (obj->base.size >= dev_priv->gtt.mappable_end &&
 +      if (obj->base.size >= ggtt->mappable_end &&
            obj->tiling_mode == I915_TILING_NONE) {
                static const unsigned int chunk_size = 256; // 1 MiB
  
                goto unpin;
  
        /* Finally, remap it using the new GTT offset */
 -      pfn = dev_priv->gtt.mappable_base +
 +      pfn = ggtt->mappable_base +
                i915_gem_obj_ggtt_offset_view(obj, &view);
        pfn >>= PAGE_SHIFT;
  
  void
  i915_gem_release_mmap(struct drm_i915_gem_object *obj)
  {
 +      /* Serialisation between user GTT access and our code depends upon
 +       * revoking the CPU's PTE whilst the mutex is held. The next user
 +       * pagefault then has to wait until we release the mutex.
 +       */
 +      lockdep_assert_held(&obj->base.dev->struct_mutex);
 +
        if (!obj->fault_mappable)
                return;
  
        drm_vma_node_unmap(&obj->base.vma_node,
                           obj->base.dev->anon_inode->i_mapping);
 +
 +      /* Ensure that the CPU's PTE are revoked and there are not outstanding
 +       * memory transactions from userspace before we return. The TLB
 +       * flushing implied above by changing the PTE above *should* be
 +       * sufficient, an extra barrier here just provides us with a bit
 +       * of paranoid documentation about our requirement to serialise
 +       * memory writes before touching registers / GSM.
 +       */
 +      wmb();
 +
        obj->fault_mappable = false;
  }
  
@@@ -2017,6 -2036,9 +2020,6 @@@ static int i915_gem_object_create_mmap_
        struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
        int ret;
  
 -      if (drm_vma_node_has_offset(&obj->base.vma_node))
 -              return 0;
 -
        dev_priv->mm.shrinker_no_lock_stealing = true;
  
        ret = drm_gem_create_mmap_offset(&obj->base);
@@@ -2065,7 -2087,7 +2068,7 @@@ i915_gem_mmap_gtt(struct drm_file *file
        if (ret)
                return ret;
  
 -      obj = to_intel_bo(drm_gem_object_lookup(dev, file, handle));
 +      obj = to_intel_bo(drm_gem_object_lookup(file, handle));
        if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
@@@ -2161,10 -2183,11 +2164,10 @@@ i915_gem_object_put_pages_gtt(struct dr
        BUG_ON(obj->madv == __I915_MADV_PURGED);
  
        ret = i915_gem_object_set_to_cpu_domain(obj, true);
 -      if (ret) {
 +      if (WARN_ON(ret)) {
                /* In the event of a disaster, abandon all caches and
                 * hope for the best.
                 */
 -              WARN_ON(ret != -EIO);
                i915_gem_clflush_object(obj, true);
                obj->base.read_domains = obj->base.write_domain = I915_GEM_DOMAIN_CPU;
        }
@@@ -2212,14 -2235,6 +2215,14 @@@ i915_gem_object_put_pages(struct drm_i9
         * lists early. */
        list_del(&obj->global_list);
  
 +      if (obj->mapping) {
 +              if (is_vmalloc_addr(obj->mapping))
 +                      vunmap(obj->mapping);
 +              else
 +                      kunmap(kmap_to_page(obj->mapping));
 +              obj->mapping = NULL;
 +      }
 +
        ops->put_pages(obj);
        obj->pages = NULL;
  
@@@ -2388,64 -2403,21 +2391,64 @@@ i915_gem_object_get_pages(struct drm_i9
        return 0;
  }
  
 +void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
 +{
 +      int ret;
 +
 +      lockdep_assert_held(&obj->base.dev->struct_mutex);
 +
 +      ret = i915_gem_object_get_pages(obj);
 +      if (ret)
 +              return ERR_PTR(ret);
 +
 +      i915_gem_object_pin_pages(obj);
 +
 +      if (obj->mapping == NULL) {
 +              struct page **pages;
 +
 +              pages = NULL;
 +              if (obj->base.size == PAGE_SIZE)
 +                      obj->mapping = kmap(sg_page(obj->pages->sgl));
 +              else
 +                      pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT,
 +                                             sizeof(*pages),
 +                                             GFP_TEMPORARY);
 +              if (pages != NULL) {
 +                      struct sg_page_iter sg_iter;
 +                      int n;
 +
 +                      n = 0;
 +                      for_each_sg_page(obj->pages->sgl, &sg_iter,
 +                                       obj->pages->nents, 0)
 +                              pages[n++] = sg_page_iter_page(&sg_iter);
 +
 +                      obj->mapping = vmap(pages, n, 0, PAGE_KERNEL);
 +                      drm_free_large(pages);
 +              }
 +              if (obj->mapping == NULL) {
 +                      i915_gem_object_unpin_pages(obj);
 +                      return ERR_PTR(-ENOMEM);
 +              }
 +      }
 +
 +      return obj->mapping;
 +}
 +
  void i915_vma_move_to_active(struct i915_vma *vma,
                             struct drm_i915_gem_request *req)
  {
        struct drm_i915_gem_object *obj = vma->obj;
 -      struct intel_engine_cs *ring;
 +      struct intel_engine_cs *engine;
  
 -      ring = i915_gem_request_get_ring(req);
 +      engine = i915_gem_request_get_engine(req);
  
        /* Add a reference if we're newly entering the active list. */
        if (obj->active == 0)
                drm_gem_object_reference(&obj->base);
 -      obj->active |= intel_ring_flag(ring);
 +      obj->active |= intel_engine_flag(engine);
  
 -      list_move_tail(&obj->ring_list[ring->id], &ring->active_list);
 -      i915_gem_request_assign(&obj->last_read_req[ring->id], req);
 +      list_move_tail(&obj->engine_list[engine->id], &engine->active_list);
 +      i915_gem_request_assign(&obj->last_read_req[engine->id], req);
  
        list_move_tail(&vma->vm_link, &vma->vm->active_list);
  }
  static void
  i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
  {
 -      RQ_BUG_ON(obj->last_write_req == NULL);
 -      RQ_BUG_ON(!(obj->active & intel_ring_flag(obj->last_write_req->ring)));
 +      GEM_BUG_ON(obj->last_write_req == NULL);
 +      GEM_BUG_ON(!(obj->active & intel_engine_flag(obj->last_write_req->engine)));
  
        i915_gem_request_assign(&obj->last_write_req, NULL);
        intel_fb_obj_flush(obj, true, ORIGIN_CS);
@@@ -2465,13 -2437,13 +2468,13 @@@ i915_gem_object_retire__read(struct drm
  {
        struct i915_vma *vma;
  
 -      RQ_BUG_ON(obj->last_read_req[ring] == NULL);
 -      RQ_BUG_ON(!(obj->active & (1 << ring)));
 +      GEM_BUG_ON(obj->last_read_req[ring] == NULL);
 +      GEM_BUG_ON(!(obj->active & (1 << ring)));
  
 -      list_del_init(&obj->ring_list[ring]);
 +      list_del_init(&obj->engine_list[ring]);
        i915_gem_request_assign(&obj->last_read_req[ring], NULL);
  
 -      if (obj->last_write_req && obj->last_write_req->ring->id == ring)
 +      if (obj->last_write_req && obj->last_write_req->engine->id == ring)
                i915_gem_object_retire__write(obj);
  
        obj->active &= ~(1 << ring);
@@@ -2498,20 -2470,24 +2501,20 @@@ static in
  i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_engine_cs *ring;
 -      int ret, i, j;
 +      struct intel_engine_cs *engine;
 +      int ret;
  
        /* Carefully retire all requests without writing to the rings */
 -      for_each_ring(ring, dev_priv, i) {
 -              ret = intel_ring_idle(ring);
 +      for_each_engine(engine, dev_priv) {
 +              ret = intel_engine_idle(engine);
                if (ret)
                        return ret;
        }
        i915_gem_retire_requests(dev);
  
        /* Finally reset hw state */
 -      for_each_ring(ring, dev_priv, i) {
 -              intel_ring_init_seqno(ring, seqno);
 -
 -              for (j = 0; j < ARRAY_SIZE(ring->semaphore.sync_seqno); j++)
 -                      ring->semaphore.sync_seqno[j] = 0;
 -      }
 +      for_each_engine(engine, dev_priv)
 +              intel_ring_init_seqno(engine, seqno);
  
        return 0;
  }
@@@ -2569,7 -2545,7 +2572,7 @@@ void __i915_add_request(struct drm_i915
                        struct drm_i915_gem_object *obj,
                        bool flush_caches)
  {
 -      struct intel_engine_cs *ring;
 +      struct intel_engine_cs *engine;
        struct drm_i915_private *dev_priv;
        struct intel_ringbuffer *ringbuf;
        u32 request_start;
        if (WARN_ON(request == NULL))
                return;
  
 -      ring = request->ring;
 -      dev_priv = ring->dev->dev_private;
 +      engine = request->engine;
 +      dev_priv = request->i915;
        ringbuf = request->ringbuf;
  
        /*
                WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
        }
  
 +      trace_i915_gem_request_add(request);
 +
 +      request->head = request_start;
 +
 +      /* Whilst this request exists, batch_obj will be on the
 +       * active_list, and so will hold the active reference. Only when this
 +       * request is retired will the the batch_obj be moved onto the
 +       * inactive_list and lose its active reference. Hence we do not need
 +       * to explicitly hold another reference here.
 +       */
 +      request->batch_obj = obj;
 +
 +      /* Seal the request and mark it as pending execution. Note that
 +       * we may inspect this state, without holding any locks, during
 +       * hangcheck. Hence we apply the barrier to ensure that we do not
 +       * see a more recent value in the hws than we are tracking.
 +       */
 +      request->emitted_jiffies = jiffies;
 +      request->previous_seqno = engine->last_submitted_seqno;
 +      smp_store_mb(engine->last_submitted_seqno, request->seqno);
 +      list_add_tail(&request->list, &engine->request_list);
 +
        /* Record the position of the start of the request so that
         * should we detect the updated seqno part-way through the
         * GPU processing the request, we never over-estimate the
        request->postfix = intel_ring_get_tail(ringbuf);
  
        if (i915.enable_execlists)
 -              ret = ring->emit_request(request);
 +              ret = engine->emit_request(request);
        else {
 -              ret = ring->add_request(request);
 +              ret = engine->add_request(request);
  
                request->tail = intel_ring_get_tail(ringbuf);
        }
        /* Not allowed to fail! */
        WARN(ret, "emit|add_request failed: %d!\n", ret);
  
 -      request->head = request_start;
 -
 -      /* Whilst this request exists, batch_obj will be on the
 -       * active_list, and so will hold the active reference. Only when this
 -       * request is retired will the the batch_obj be moved onto the
 -       * inactive_list and lose its active reference. Hence we do not need
 -       * to explicitly hold another reference here.
 -       */
 -      request->batch_obj = obj;
 -
 -      request->emitted_jiffies = jiffies;
 -      request->previous_seqno = ring->last_submitted_seqno;
 -      ring->last_submitted_seqno = request->seqno;
 -      list_add_tail(&request->list, &ring->request_list);
 -
 -      trace_i915_gem_request_add(request);
 -
 -      i915_queue_hangcheck(ring->dev);
 +      i915_queue_hangcheck(engine->dev);
  
        queue_delayed_work(dev_priv->wq,
                           &dev_priv->mm.retire_work,
@@@ -2712,7 -2683,7 +2715,7 @@@ void i915_gem_request_free(struct kref 
  
        if (ctx) {
                if (i915.enable_execlists && ctx != req->i915->kernel_context)
 -                      intel_lr_context_unpin(ctx, req->ring);
 +                      intel_lr_context_unpin(ctx, req->engine);
  
                i915_gem_context_unreference(ctx);
        }
  }
  
  static inline int
 -__i915_gem_request_alloc(struct intel_engine_cs *ring,
 +__i915_gem_request_alloc(struct intel_engine_cs *engine,
                         struct intel_context *ctx,
                         struct drm_i915_gem_request **req_out)
  {
 -      struct drm_i915_private *dev_priv = to_i915(ring->dev);
 +      struct drm_i915_private *dev_priv = to_i915(engine->dev);
 +      unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
        struct drm_i915_gem_request *req;
        int ret;
  
  
        *req_out = NULL;
  
 +      /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
 +       * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
 +       * and restart.
 +       */
 +      ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
 +      if (ret)
 +              return ret;
 +
        req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
        if (req == NULL)
                return -ENOMEM;
  
 -      ret = i915_gem_get_seqno(ring->dev, &req->seqno);
 +      ret = i915_gem_get_seqno(engine->dev, &req->seqno);
        if (ret)
                goto err;
  
        kref_init(&req->ref);
        req->i915 = dev_priv;
 -      req->ring = ring;
 +      req->engine = engine;
 +      req->reset_counter = reset_counter;
        req->ctx  = ctx;
        i915_gem_context_reference(req->ctx);
  
                 * fully prepared. Thus it can be cleaned up using the proper
                 * free code.
                 */
 -              i915_gem_request_cancel(req);
 +              intel_ring_reserved_space_cancel(req->ringbuf);
 +              i915_gem_request_unreference(req);
                return ret;
        }
  
@@@ -2822,12 -2782,19 +2825,12 @@@ i915_gem_request_alloc(struct intel_eng
        return err ? ERR_PTR(err) : req;
  }
  
 -void i915_gem_request_cancel(struct drm_i915_gem_request *req)
 -{
 -      intel_ring_reserved_space_cancel(req->ringbuf);
 -
 -      i915_gem_request_unreference(req);
 -}
 -
  struct drm_i915_gem_request *
 -i915_gem_find_active_request(struct intel_engine_cs *ring)
 +i915_gem_find_active_request(struct intel_engine_cs *engine)
  {
        struct drm_i915_gem_request *request;
  
 -      list_for_each_entry(request, &ring->request_list, list) {
 +      list_for_each_entry(request, &engine->request_list, list) {
                if (i915_gem_request_completed(request, false))
                        continue;
  
        return NULL;
  }
  
 -static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
 -                                     struct intel_engine_cs *ring)
 +static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv,
 +                                     struct intel_engine_cs *engine)
  {
        struct drm_i915_gem_request *request;
        bool ring_hung;
  
 -      request = i915_gem_find_active_request(ring);
 +      request = i915_gem_find_active_request(engine);
  
        if (request == NULL)
                return;
  
 -      ring_hung = ring->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
 +      ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
  
        i915_set_reset_status(dev_priv, request->ctx, ring_hung);
  
 -      list_for_each_entry_continue(request, &ring->request_list, list)
 +      list_for_each_entry_continue(request, &engine->request_list, list)
                i915_set_reset_status(dev_priv, request->ctx, false);
  }
  
 -static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
 -                                      struct intel_engine_cs *ring)
 +static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
 +                                      struct intel_engine_cs *engine)
  {
        struct intel_ringbuffer *buffer;
  
 -      while (!list_empty(&ring->active_list)) {
 +      while (!list_empty(&engine->active_list)) {
                struct drm_i915_gem_object *obj;
  
 -              obj = list_first_entry(&ring->active_list,
 +              obj = list_first_entry(&engine->active_list,
                                       struct drm_i915_gem_object,
 -                                     ring_list[ring->id]);
 +                                     engine_list[engine->id]);
  
 -              i915_gem_object_retire__read(obj, ring->id);
 +              i915_gem_object_retire__read(obj, engine->id);
        }
  
        /*
         */
  
        if (i915.enable_execlists) {
 -              spin_lock_irq(&ring->execlist_lock);
 +              /* Ensure irq handler finishes or is cancelled. */
 +              tasklet_kill(&engine->irq_tasklet);
  
 +              spin_lock_bh(&engine->execlist_lock);
                /* list_splice_tail_init checks for empty lists */
 -              list_splice_tail_init(&ring->execlist_queue,
 -                                    &ring->execlist_retired_req_list);
 +              list_splice_tail_init(&engine->execlist_queue,
 +                                    &engine->execlist_retired_req_list);
 +              spin_unlock_bh(&engine->execlist_lock);
  
 -              spin_unlock_irq(&ring->execlist_lock);
 -              intel_execlists_retire_requests(ring);
 +              intel_execlists_retire_requests(engine);
        }
  
        /*
         * implicit references on things like e.g. ppgtt address spaces through
         * the request.
         */
 -      while (!list_empty(&ring->request_list)) {
 +      while (!list_empty(&engine->request_list)) {
                struct drm_i915_gem_request *request;
  
 -              request = list_first_entry(&ring->request_list,
 +              request = list_first_entry(&engine->request_list,
                                           struct drm_i915_gem_request,
                                           list);
  
         * upon reset is less than when we start. Do one more pass over
         * all the ringbuffers to reset last_retired_head.
         */
 -      list_for_each_entry(buffer, &ring->buffers, link) {
 +      list_for_each_entry(buffer, &engine->buffers, link) {
                buffer->last_retired_head = buffer->tail;
                intel_ring_update_space(buffer);
        }
 +
 +      intel_ring_init_seqno(engine, engine->last_submitted_seqno);
  }
  
  void i915_gem_reset(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_engine_cs *ring;
 -      int i;
 +      struct intel_engine_cs *engine;
  
        /*
         * Before we free the objects from the requests, we need to inspect
         * them for finding the guilty party. As the requests only borrow
         * their reference to the objects, the inspection must be done first.
         */
 -      for_each_ring(ring, dev_priv, i)
 -              i915_gem_reset_ring_status(dev_priv, ring);
 +      for_each_engine(engine, dev_priv)
 +              i915_gem_reset_engine_status(dev_priv, engine);
  
 -      for_each_ring(ring, dev_priv, i)
 -              i915_gem_reset_ring_cleanup(dev_priv, ring);
 +      for_each_engine(engine, dev_priv)
 +              i915_gem_reset_engine_cleanup(dev_priv, engine);
  
        i915_gem_context_reset(dev);
  
   * This function clears the request list as sequence numbers are passed.
   */
  void
 -i915_gem_retire_requests_ring(struct intel_engine_cs *ring)
 +i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
  {
 -      WARN_ON(i915_verify_lists(ring->dev));
 +      WARN_ON(i915_verify_lists(engine->dev));
  
        /* Retire requests first as we use it above for the early return.
         * If we retire requests last, we may use a later seqno and so clear
         * the requests lists without clearing the active list, leading to
         * confusion.
         */
 -      while (!list_empty(&ring->request_list)) {
 +      while (!list_empty(&engine->request_list)) {
                struct drm_i915_gem_request *request;
  
 -              request = list_first_entry(&ring->request_list,
 +              request = list_first_entry(&engine->request_list,
                                           struct drm_i915_gem_request,
                                           list);
  
         * by the ringbuffer to the flushing/inactive lists as appropriate,
         * before we free the context associated with the requests.
         */
 -      while (!list_empty(&ring->active_list)) {
 +      while (!list_empty(&engine->active_list)) {
                struct drm_i915_gem_object *obj;
  
 -              obj = list_first_entry(&ring->active_list,
 -                                    struct drm_i915_gem_object,
 -                                    ring_list[ring->id]);
 +              obj = list_first_entry(&engine->active_list,
 +                                     struct drm_i915_gem_object,
 +                                     engine_list[engine->id]);
  
 -              if (!list_empty(&obj->last_read_req[ring->id]->list))
 +              if (!list_empty(&obj->last_read_req[engine->id]->list))
                        break;
  
 -              i915_gem_object_retire__read(obj, ring->id);
 +              i915_gem_object_retire__read(obj, engine->id);
        }
  
 -      if (unlikely(ring->trace_irq_req &&
 -                   i915_gem_request_completed(ring->trace_irq_req, true))) {
 -              ring->irq_put(ring);
 -              i915_gem_request_assign(&ring->trace_irq_req, NULL);
 +      if (unlikely(engine->trace_irq_req &&
 +                   i915_gem_request_completed(engine->trace_irq_req, true))) {
 +              engine->irq_put(engine);
 +              i915_gem_request_assign(&engine->trace_irq_req, NULL);
        }
  
 -      WARN_ON(i915_verify_lists(ring->dev));
 +      WARN_ON(i915_verify_lists(engine->dev));
  }
  
  bool
  i915_gem_retire_requests(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_engine_cs *ring;
 +      struct intel_engine_cs *engine;
        bool idle = true;
 -      int i;
  
 -      for_each_ring(ring, dev_priv, i) {
 -              i915_gem_retire_requests_ring(ring);
 -              idle &= list_empty(&ring->request_list);
 +      for_each_engine(engine, dev_priv) {
 +              i915_gem_retire_requests_ring(engine);
 +              idle &= list_empty(&engine->request_list);
                if (i915.enable_execlists) {
 -                      spin_lock_irq(&ring->execlist_lock);
 -                      idle &= list_empty(&ring->execlist_queue);
 -                      spin_unlock_irq(&ring->execlist_lock);
 +                      spin_lock_bh(&engine->execlist_lock);
 +                      idle &= list_empty(&engine->execlist_queue);
 +                      spin_unlock_bh(&engine->execlist_lock);
  
 -                      intel_execlists_retire_requests(ring);
 +                      intel_execlists_retire_requests(engine);
                }
        }
  
@@@ -3049,21 -3014,25 +3052,21 @@@ i915_gem_idle_work_handler(struct work_
        struct drm_i915_private *dev_priv =
                container_of(work, typeof(*dev_priv), mm.idle_work.work);
        struct drm_device *dev = dev_priv->dev;
 -      struct intel_engine_cs *ring;
 -      int i;
 +      struct intel_engine_cs *engine;
  
 -      for_each_ring(ring, dev_priv, i)
 -              if (!list_empty(&ring->request_list))
 +      for_each_engine(engine, dev_priv)
 +              if (!list_empty(&engine->request_list))
                        return;
  
        /* we probably should sync with hangcheck here, using cancel_work_sync.
 -       * Also locking seems to be fubar here, ring->request_list is protected
 +       * Also locking seems to be fubar here, engine->request_list is protected
         * by dev->struct_mutex. */
  
        intel_mark_idle(dev);
  
        if (mutex_trylock(&dev->struct_mutex)) {
 -              struct intel_engine_cs *ring;
 -              int i;
 -
 -              for_each_ring(ring, dev_priv, i)
 -                      i915_gem_batch_pool_fini(&ring->batch_pool);
 +              for_each_engine(engine, dev_priv)
 +                      i915_gem_batch_pool_fini(&engine->batch_pool);
  
                mutex_unlock(&dev->struct_mutex);
        }
@@@ -3082,7 -3051,7 +3085,7 @@@ i915_gem_object_flush_active(struct drm
        if (!obj->active)
                return 0;
  
 -      for (i = 0; i < I915_NUM_RINGS; i++) {
 +      for (i = 0; i < I915_NUM_ENGINES; i++) {
                struct drm_i915_gem_request *req;
  
                req = obj->last_read_req[i];
@@@ -3127,9 -3096,11 +3130,9 @@@ retire
  int
  i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
  {
 -      struct drm_i915_private *dev_priv = dev->dev_private;
        struct drm_i915_gem_wait *args = data;
        struct drm_i915_gem_object *obj;
 -      struct drm_i915_gem_request *req[I915_NUM_RINGS];
 -      unsigned reset_counter;
 +      struct drm_i915_gem_request *req[I915_NUM_ENGINES];
        int i, n = 0;
        int ret;
  
        if (ret)
                return ret;
  
 -      obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->bo_handle));
 +      obj = to_intel_bo(drm_gem_object_lookup(file, args->bo_handle));
        if (&obj->base == NULL) {
                mutex_unlock(&dev->struct_mutex);
                return -ENOENT;
        }
  
        drm_gem_object_unreference(&obj->base);
 -      reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
  
 -      for (i = 0; i < I915_NUM_RINGS; i++) {
 +      for (i = 0; i < I915_NUM_ENGINES; i++) {
                if (obj->last_read_req[i] == NULL)
                        continue;
  
  
        for (i = 0; i < n; i++) {
                if (ret == 0)
 -                      ret = __i915_wait_request(req[i], reset_counter, true,
 +                      ret = __i915_wait_request(req[i], true,
                                                  args->timeout_ns > 0 ? &args->timeout_ns : NULL,
                                                  to_rps_client(file));
                i915_gem_request_unreference__unlocked(req[i]);
@@@ -3197,7 -3169,7 +3200,7 @@@ __i915_gem_object_sync(struct drm_i915_
        struct intel_engine_cs *from;
        int ret;
  
 -      from = i915_gem_request_get_ring(from_req);
 +      from = i915_gem_request_get_engine(from_req);
        if (to == from)
                return 0;
  
        if (!i915_semaphore_is_enabled(obj->base.dev)) {
                struct drm_i915_private *i915 = to_i915(obj->base.dev);
                ret = __i915_wait_request(from_req,
 -                                        atomic_read(&i915->gpu_error.reset_counter),
                                          i915->mm.interruptible,
                                          NULL,
                                          &i915->rps.semaphores);
@@@ -3290,7 -3263,7 +3293,7 @@@ i915_gem_object_sync(struct drm_i915_ge
                     struct drm_i915_gem_request **to_req)
  {
        const bool readonly = obj->base.pending_write_domain == 0;
 -      struct drm_i915_gem_request *req[I915_NUM_RINGS];
 +      struct drm_i915_gem_request *req[I915_NUM_ENGINES];
        int ret, i, n;
  
        if (!obj->active)
                if (obj->last_write_req)
                        req[n++] = obj->last_write_req;
        } else {
 -              for (i = 0; i < I915_NUM_RINGS; i++)
 +              for (i = 0; i < I915_NUM_ENGINES; i++)
                        if (obj->last_read_req[i])
                                req[n++] = obj->last_read_req[i];
        }
@@@ -3327,6 -3300,9 +3330,6 @@@ static void i915_gem_object_finish_gtt(
        if ((obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0)
                return;
  
 -      /* Wait for any direct GTT access to complete */
 -      mb();
 -
        old_read_domains = obj->base.read_domains;
        old_write_domain = obj->base.write_domain;
  
@@@ -3418,25 -3394,28 +3421,25 @@@ int __i915_vma_unbind_no_wait(struct i9
  int i915_gpu_idle(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_engine_cs *ring;
 -      int ret, i;
 +      struct intel_engine_cs *engine;
 +      int ret;
  
        /* Flush everything onto the inactive list. */
 -      for_each_ring(ring, dev_priv, i) {
 +      for_each_engine(engine, dev_priv) {
                if (!i915.enable_execlists) {
                        struct drm_i915_gem_request *req;
  
 -                      req = i915_gem_request_alloc(ring, NULL);
 +                      req = i915_gem_request_alloc(engine, NULL);
                        if (IS_ERR(req))
                                return PTR_ERR(req);
  
                        ret = i915_switch_context(req);
 -                      if (ret) {
 -                              i915_gem_request_cancel(req);
 -                              return ret;
 -                      }
 -
                        i915_add_request_no_flush(req);
 +                      if (ret)
 +                              return ret;
                }
  
 -              ret = intel_ring_idle(ring);
 +              ret = intel_engine_idle(engine);
                if (ret)
                        return ret;
        }
@@@ -3490,8 -3469,7 +3493,8 @@@ i915_gem_object_bind_to_vm(struct drm_i
                           uint64_t flags)
  {
        struct drm_device *dev = obj->base.dev;
 -      struct drm_i915_private *dev_priv = dev->dev_private;
 +      struct drm_i915_private *dev_priv = to_i915(dev);
 +      struct i915_ggtt *ggtt = &dev_priv->ggtt;
        u32 fence_alignment, unfenced_alignment;
        u32 search_flag, alloc_flag;
        u64 start, end;
        start = flags & PIN_OFFSET_BIAS ? flags & PIN_OFFSET_MASK : 0;
        end = vm->total;
        if (flags & PIN_MAPPABLE)
 -              end = min_t(u64, end, dev_priv->gtt.mappable_end);
 +              end = min_t(u64, end, ggtt->mappable_end);
        if (flags & PIN_ZONE_4G)
                end = min_t(u64, end, (1ULL << 32) - PAGE_SIZE);
  
@@@ -3745,9 -3723,6 +3748,9 @@@ i915_gem_object_flush_cpu_write_domain(
  int
  i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
  {
 +      struct drm_device *dev = obj->base.dev;
 +      struct drm_i915_private *dev_priv = to_i915(dev);
 +      struct i915_ggtt *ggtt = &dev_priv->ggtt;
        uint32_t old_write_domain, old_read_domains;
        struct i915_vma *vma;
        int ret;
        vma = i915_gem_obj_to_ggtt(obj);
        if (vma && drm_mm_node_allocated(&vma->node) && !obj->active)
                list_move_tail(&vma->vm_link,
 -                             &to_i915(obj->base.dev)->gtt.base.inactive_list);
 +                             &ggtt->base.inactive_list);
  
        return 0;
  }
@@@ -3934,7 -3909,7 +3937,7 @@@ int i915_gem_get_caching_ioctl(struct d
        struct drm_i915_gem_caching *args = data;
        struct drm_i915_gem_object *obj;
  
 -      obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 +      obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
        if (&obj->base == NULL)
                return -ENOENT;
  
@@@ -3977,7 -3952,7 +3980,7 @@@ int i915_gem_set_caching_ioctl(struct d
                 * cacheline, whereas normally such cachelines would get
                 * invalidated.
                 */
 -              if (IS_BXT_REVID(dev, 0, BXT_REVID_A1))
 +              if (!HAS_LLC(dev) && !HAS_SNOOP(dev))
                        return -ENODEV;
  
                level = I915_CACHE_LLC;
        if (ret)
                goto rpm_put;
  
 -      obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 +      obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
        if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
@@@ -4156,15 -4131,16 +4159,15 @@@ i915_gem_ring_throttle(struct drm_devic
        struct drm_i915_file_private *file_priv = file->driver_priv;
        unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
        struct drm_i915_gem_request *request, *target = NULL;
 -      unsigned reset_counter;
        int ret;
  
        ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
        if (ret)
                return ret;
  
 -      ret = i915_gem_check_wedge(&dev_priv->gpu_error, false);
 -      if (ret)
 -              return ret;
 +      /* ABI: return -EIO if already wedged */
 +      if (i915_terminally_wedged(&dev_priv->gpu_error))
 +              return -EIO;
  
        spin_lock(&file_priv->mm.lock);
        list_for_each_entry(request, &file_priv->mm.request_list, client_list) {
  
                target = request;
        }
 -      reset_counter = atomic_read(&dev_priv->gpu_error.reset_counter);
        if (target)
                i915_gem_request_reference(target);
        spin_unlock(&file_priv->mm.lock);
        if (target == NULL)
                return 0;
  
 -      ret = __i915_wait_request(target, reset_counter, true, NULL, NULL);
 +      ret = __i915_wait_request(target, true, NULL, NULL);
        if (ret == 0)
                queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
  
@@@ -4237,7 -4214,7 +4240,7 @@@ void __i915_vma_set_map_and_fenceable(s
                     (vma->node.start & (fence_alignment - 1)) == 0);
  
        mappable = (vma->node.start + fence_size <=
 -                  to_i915(obj->base.dev)->gtt.mappable_end);
 +                  to_i915(obj->base.dev)->ggtt.mappable_end);
  
        obj->map_and_fenceable = mappable && fenceable;
  }
@@@ -4269,6 -4246,9 +4272,6 @@@ i915_gem_object_do_pin(struct drm_i915_
        vma = ggtt_view ? i915_gem_obj_to_ggtt_view(obj, ggtt_view) :
                          i915_gem_obj_to_vma(obj, vm);
  
 -      if (IS_ERR(vma))
 -              return PTR_ERR(vma);
 -
        if (vma) {
                if (WARN_ON(vma->pin_count == DRM_I915_GEM_OBJECT_MAX_PIN_COUNT))
                        return -EBUSY;
@@@ -4331,13 -4311,10 +4334,13 @@@ i915_gem_object_ggtt_pin(struct drm_i91
                         uint32_t alignment,
                         uint64_t flags)
  {
 -      if (WARN_ONCE(!view, "no view specified"))
 -              return -EINVAL;
 +      struct drm_device *dev = obj->base.dev;
 +      struct drm_i915_private *dev_priv = to_i915(dev);
 +      struct i915_ggtt *ggtt = &dev_priv->ggtt;
 +
 +      BUG_ON(!view);
  
 -      return i915_gem_object_do_pin(obj, i915_obj_to_ggtt(obj), view,
 +      return i915_gem_object_do_pin(obj, &ggtt->base, view,
                                      alignment, flags | PIN_GLOBAL);
  }
  
@@@ -4347,6 -4324,7 +4350,6 @@@ i915_gem_object_ggtt_unpin_view(struct 
  {
        struct i915_vma *vma = i915_gem_obj_to_ggtt_view(obj, view);
  
 -      BUG_ON(!vma);
        WARN_ON(vma->pin_count == 0);
        WARN_ON(!i915_gem_obj_ggtt_bound_view(obj, view));
  
@@@ -4365,7 -4343,7 +4368,7 @@@ i915_gem_busy_ioctl(struct drm_device *
        if (ret)
                return ret;
  
 -      obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 +      obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
        if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
        if (obj->active) {
                int i;
  
 -              for (i = 0; i < I915_NUM_RINGS; i++) {
 +              for (i = 0; i < I915_NUM_ENGINES; i++) {
                        struct drm_i915_gem_request *req;
  
                        req = obj->last_read_req[i];
                        if (req)
 -                              args->busy |= 1 << (16 + req->ring->exec_id);
 +                              args->busy |= 1 << (16 + req->engine->exec_id);
                }
                if (obj->last_write_req)
 -                      args->busy |= obj->last_write_req->ring->exec_id;
 +                      args->busy |= obj->last_write_req->engine->exec_id;
        }
  
  unref:
@@@ -4430,7 -4408,7 +4433,7 @@@ i915_gem_madvise_ioctl(struct drm_devic
        if (ret)
                return ret;
  
 -      obj = to_intel_bo(drm_gem_object_lookup(dev, file_priv, args->handle));
 +      obj = to_intel_bo(drm_gem_object_lookup(file_priv, args->handle));
        if (&obj->base == NULL) {
                ret = -ENOENT;
                goto unlock;
@@@ -4472,8 -4450,8 +4475,8 @@@ void i915_gem_object_init(struct drm_i9
        int i;
  
        INIT_LIST_HEAD(&obj->global_list);
 -      for (i = 0; i < I915_NUM_RINGS; i++)
 -              INIT_LIST_HEAD(&obj->ring_list[i]);
 +      for (i = 0; i < I915_NUM_ENGINES; i++)
 +              INIT_LIST_HEAD(&obj->engine_list[i]);
        INIT_LIST_HEAD(&obj->obj_exec_link);
        INIT_LIST_HEAD(&obj->vma_list);
        INIT_LIST_HEAD(&obj->batch_pool_link);
@@@ -4648,15 -4626,14 +4651,15 @@@ struct i915_vma *i915_gem_obj_to_vma(st
  struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
                                           const struct i915_ggtt_view *view)
  {
 -      struct i915_address_space *ggtt = i915_obj_to_ggtt(obj);
 +      struct drm_device *dev = obj->base.dev;
 +      struct drm_i915_private *dev_priv = to_i915(dev);
 +      struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct i915_vma *vma;
  
 -      if (WARN_ONCE(!view, "no view specified"))
 -              return ERR_PTR(-EINVAL);
 +      BUG_ON(!view);
  
        list_for_each_entry(vma, &obj->vma_list, obj_link)
 -              if (vma->vm == ggtt &&
 +              if (vma->vm == &ggtt->base &&
                    i915_ggtt_view_equal(&vma->ggtt_view, view))
                        return vma;
        return NULL;
@@@ -4679,13 -4656,14 +4682,13 @@@ void i915_gem_vma_destroy(struct i915_v
  }
  
  static void
 -i915_gem_stop_ringbuffers(struct drm_device *dev)
 +i915_gem_stop_engines(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_engine_cs *ring;
 -      int i;
 +      struct intel_engine_cs *engine;
  
 -      for_each_ring(ring, dev_priv, i)
 -              dev_priv->gt.stop_ring(ring);
 +      for_each_engine(engine, dev_priv)
 +              dev_priv->gt.stop_engine(engine);
  }
  
  int
@@@ -4701,7 -4679,7 +4704,7 @@@ i915_gem_suspend(struct drm_device *dev
  
        i915_gem_retire_requests(dev);
  
 -      i915_gem_stop_ringbuffers(dev);
 +      i915_gem_stop_engines(dev);
        mutex_unlock(&dev->struct_mutex);
  
        cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
@@@ -4722,8 -4700,8 +4725,8 @@@ err
  
  int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
  {
 -      struct intel_engine_cs *ring = req->ring;
 -      struct drm_device *dev = ring->dev;
 +      struct intel_engine_cs *engine = req->engine;
 +      struct drm_device *dev = engine->dev;
        struct drm_i915_private *dev_priv = dev->dev_private;
        u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
        int i, ret;
         * at initialization time.
         */
        for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
 -              intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(1));
 -              intel_ring_emit_reg(ring, GEN7_L3LOG(slice, i));
 -              intel_ring_emit(ring, remap_info[i]);
 +              intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
 +              intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
 +              intel_ring_emit(engine, remap_info[i]);
        }
  
 -      intel_ring_advance(ring);
 +      intel_ring_advance(engine);
  
        return ret;
  }
@@@ -4803,7 -4781,7 +4806,7 @@@ static void init_unused_rings(struct dr
        }
  }
  
 -int i915_gem_init_rings(struct drm_device *dev)
 +int i915_gem_init_engines(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
        int ret;
        return 0;
  
  cleanup_vebox_ring:
 -      intel_cleanup_ring_buffer(&dev_priv->ring[VECS]);
 +      intel_cleanup_engine(&dev_priv->engine[VECS]);
  cleanup_blt_ring:
 -      intel_cleanup_ring_buffer(&dev_priv->ring[BCS]);
 +      intel_cleanup_engine(&dev_priv->engine[BCS]);
  cleanup_bsd_ring:
 -      intel_cleanup_ring_buffer(&dev_priv->ring[VCS]);
 +      intel_cleanup_engine(&dev_priv->engine[VCS]);
  cleanup_render_ring:
 -      intel_cleanup_ring_buffer(&dev_priv->ring[RCS]);
 +      intel_cleanup_engine(&dev_priv->engine[RCS]);
  
        return ret;
  }
@@@ -4854,8 -4832,8 +4857,8 @@@ in
  i915_gem_init_hw(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_engine_cs *ring;
 -      int ret, i, j;
 +      struct intel_engine_cs *engine;
 +      int ret, j;
  
        if (INTEL_INFO(dev)->gen < 6 && !intel_enable_gtt())
                return -EIO;
        /* Double layer security blanket, see i915_gem_init() */
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
  
 -      if (dev_priv->ellc_size)
 +      if (HAS_EDRAM(dev) && INTEL_GEN(dev_priv) < 9)
                I915_WRITE(HSW_IDICR, I915_READ(HSW_IDICR) | IDIHASHMSK(0xf));
  
        if (IS_HASWELL(dev))
        }
  
        /* Need to do basic initialisation of all rings first: */
 -      for_each_ring(ring, dev_priv, i) {
 -              ret = ring->init_hw(ring);
 +      for_each_engine(engine, dev_priv) {
 +              ret = engine->init_hw(engine);
                if (ret)
                        goto out;
        }
  
 +      intel_mocs_init_l3cc_table(dev);
 +
        /* We can't enable contexts until all firmware is loaded */
        if (HAS_GUC_UCODE(dev)) {
                ret = intel_guc_ucode_load(dev);
                goto out;
  
        /* Now it is safe to go back round and do everything else: */
 -      for_each_ring(ring, dev_priv, i) {
 +      for_each_engine(engine, dev_priv) {
                struct drm_i915_gem_request *req;
  
 -              req = i915_gem_request_alloc(ring, NULL);
 +              req = i915_gem_request_alloc(engine, NULL);
                if (IS_ERR(req)) {
                        ret = PTR_ERR(req);
 -                      i915_gem_cleanup_ringbuffer(dev);
 -                      goto out;
 +                      break;
                }
  
 -              if (ring->id == RCS) {
 -                      for (j = 0; j < NUM_L3_SLICES(dev); j++)
 -                              i915_gem_l3_remap(req, j);
 +              if (engine->id == RCS) {
 +                      for (j = 0; j < NUM_L3_SLICES(dev); j++) {
 +                              ret = i915_gem_l3_remap(req, j);
 +                              if (ret)
 +                                      goto err_request;
 +                      }
                }
  
                ret = i915_ppgtt_init_ring(req);
 -              if (ret && ret != -EIO) {
 -                      DRM_ERROR("PPGTT enable ring #%d failed %d\n", i, ret);
 -                      i915_gem_request_cancel(req);
 -                      i915_gem_cleanup_ringbuffer(dev);
 -                      goto out;
 -              }
 +              if (ret)
 +                      goto err_request;
  
                ret = i915_gem_context_enable(req);
 -              if (ret && ret != -EIO) {
 -                      DRM_ERROR("Context enable ring #%d failed %d\n", i, ret);
 -                      i915_gem_request_cancel(req);
 -                      i915_gem_cleanup_ringbuffer(dev);
 -                      goto out;
 -              }
 +              if (ret)
 +                      goto err_request;
  
 +err_request:
                i915_add_request_no_flush(req);
 +              if (ret) {
 +                      DRM_ERROR("Failed to enable %s, error=%d\n",
 +                                engine->name, ret);
 +                      i915_gem_cleanup_engines(dev);
 +                      break;
 +              }
        }
  
  out:
@@@ -4980,14 -4955,14 +4983,14 @@@ int i915_gem_init(struct drm_device *de
  
        if (!i915.enable_execlists) {
                dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
 -              dev_priv->gt.init_rings = i915_gem_init_rings;
 -              dev_priv->gt.cleanup_ring = intel_cleanup_ring_buffer;
 -              dev_priv->gt.stop_ring = intel_stop_ring_buffer;
 +              dev_priv->gt.init_engines = i915_gem_init_engines;
 +              dev_priv->gt.cleanup_engine = intel_cleanup_engine;
 +              dev_priv->gt.stop_engine = intel_stop_engine;
        } else {
                dev_priv->gt.execbuf_submit = intel_execlists_submission;
 -              dev_priv->gt.init_rings = intel_logical_rings_init;
 -              dev_priv->gt.cleanup_ring = intel_logical_ring_cleanup;
 -              dev_priv->gt.stop_ring = intel_logical_ring_stop;
 +              dev_priv->gt.init_engines = intel_logical_rings_init;
 +              dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
 +              dev_priv->gt.stop_engine = intel_logical_ring_stop;
        }
  
        /* This is just a security blanket to placate dragons.
        if (ret)
                goto out_unlock;
  
 -      i915_gem_init_global_gtt(dev);
 +      i915_gem_init_ggtt(dev);
  
        ret = i915_gem_context_init(dev);
        if (ret)
                goto out_unlock;
  
 -      ret = dev_priv->gt.init_rings(dev);
 +      ret = dev_priv->gt.init_engines(dev);
        if (ret)
                goto out_unlock;
  
@@@ -5031,52 -5006,29 +5034,52 @@@ out_unlock
  }
  
  void
 -i915_gem_cleanup_ringbuffer(struct drm_device *dev)
 +i915_gem_cleanup_engines(struct drm_device *dev)
  {
        struct drm_i915_private *dev_priv = dev->dev_private;
 -      struct intel_engine_cs *ring;
 -      int i;
 +      struct intel_engine_cs *engine;
  
 -      for_each_ring(ring, dev_priv, i)
 -              dev_priv->gt.cleanup_ring(ring);
 +      for_each_engine(engine, dev_priv)
 +              dev_priv->gt.cleanup_engine(engine);
  
 -    if (i915.enable_execlists)
 -            /*
 -             * Neither the BIOS, ourselves or any other kernel
 -             * expects the system to be in execlists mode on startup,
 -             * so we need to reset the GPU back to legacy mode.
 -             */
 -            intel_gpu_reset(dev);
 +      if (i915.enable_execlists)
 +              /*
 +               * Neither the BIOS, ourselves or any other kernel
 +               * expects the system to be in execlists mode on startup,
 +               * so we need to reset the GPU back to legacy mode.
 +               */
 +              intel_gpu_reset(dev, ALL_ENGINES);
  }
  
  static void
 -init_ring_lists(struct intel_engine_cs *ring)
 +init_engine_lists(struct intel_engine_cs *engine)
  {
 -      INIT_LIST_HEAD(&ring->active_list);
 -      INIT_LIST_HEAD(&ring->request_list);
 +      INIT_LIST_HEAD(&engine->active_list);
 +      INIT_LIST_HEAD(&engine->request_list);
 +}
 +
 +void
 +i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
 +{
 +      struct drm_device *dev = dev_priv->dev;
 +
 +      if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
 +          !IS_CHERRYVIEW(dev_priv))
 +              dev_priv->num_fence_regs = 32;
 +      else if (INTEL_INFO(dev_priv)->gen >= 4 || IS_I945G(dev_priv) ||
 +               IS_I945GM(dev_priv) || IS_G33(dev_priv))
 +              dev_priv->num_fence_regs = 16;
 +      else
 +              dev_priv->num_fence_regs = 8;
 +
 +      if (intel_vgpu_active(dev))
 +              dev_priv->num_fence_regs =
 +                              I915_READ(vgtif_reg(avail_rs.fence_num));
 +
 +      /* Initialize fence registers to zero */
 +      i915_gem_restore_fences(dev);
 +
 +      i915_gem_detect_bit_6_swizzle(dev);
  }
  
  void
@@@ -5106,8 -5058,8 +5109,8 @@@ i915_gem_load_init(struct drm_device *d
        INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
        INIT_LIST_HEAD(&dev_priv->mm.bound_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
 -      for (i = 0; i < I915_NUM_RINGS; i++)
 -              init_ring_lists(&dev_priv->ring[i]);
 +      for (i = 0; i < I915_NUM_ENGINES; i++)
 +              init_engine_lists(&dev_priv->engine[i]);
        for (i = 0; i < I915_MAX_NUM_FENCES; i++)
                INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
        INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
  
        dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
  
 -      if (INTEL_INFO(dev)->gen >= 7 && !IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev))
 -              dev_priv->num_fence_regs = 32;
 -      else if (INTEL_INFO(dev)->gen >= 4 || IS_I945G(dev) || IS_I945GM(dev) || IS_G33(dev))
 -              dev_priv->num_fence_regs = 16;
 -      else
 -              dev_priv->num_fence_regs = 8;
 -
 -      if (intel_vgpu_active(dev))
 -              dev_priv->num_fence_regs =
 -                              I915_READ(vgtif_reg(avail_rs.fence_num));
 -
        /*
         * Set initial sequence number for requests.
         * Using this number allows the wraparound to happen early,
        dev_priv->next_seqno = ((u32)~0 - 0x1100);
        dev_priv->last_seqno = ((u32)~0 - 0x1101);
  
 -      /* Initialize fence registers to zero */
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
 -      i915_gem_restore_fences(dev);
  
 -      i915_gem_detect_bit_6_swizzle(dev);
        init_waitqueue_head(&dev_priv->pending_flip_queue);
  
        dev_priv->mm.interruptible = true;
@@@ -5250,12 -5216,11 +5253,12 @@@ u64 i915_gem_obj_offset(struct drm_i915
  u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
                                  const struct i915_ggtt_view *view)
  {
 -      struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
 +      struct drm_i915_private *dev_priv = to_i915(o->base.dev);
 +      struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct i915_vma *vma;
  
        list_for_each_entry(vma, &o->vma_list, obj_link)
 -              if (vma->vm == ggtt &&
 +              if (vma->vm == &ggtt->base &&
                    i915_ggtt_view_equal(&vma->ggtt_view, view))
                        return vma->node.start;
  
@@@ -5282,12 -5247,11 +5285,12 @@@ bool i915_gem_obj_bound(struct drm_i915
  bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
                                  const struct i915_ggtt_view *view)
  {
 -      struct i915_address_space *ggtt = i915_obj_to_ggtt(o);
 +      struct drm_i915_private *dev_priv = to_i915(o->base.dev);
 +      struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct i915_vma *vma;
  
        list_for_each_entry(vma, &o->vma_list, obj_link)
 -              if (vma->vm == ggtt &&
 +              if (vma->vm == &ggtt->base &&
                    i915_ggtt_view_equal(&vma->ggtt_view, view) &&
                    drm_mm_node_allocated(&vma->node))
                        return true;