drm/i915: Simplify request_alloc by returning the allocated request
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_gem.c
index aad26851cee3c87514fc9f4162a7bff2ac88ec75..b6c4ff63725ff52b0061c0c1dcd0ab2578a4505f 100644 (file)
 #include <drm/drm_vma_manager.h>
 #include <drm/i915_drm.h>
 #include "i915_drv.h"
+#include "i915_gem_dmabuf.h"
 #include "i915_vgpu.h"
 #include "i915_trace.h"
 #include "intel_drv.h"
 #include "intel_mocs.h"
+#include <linux/reservation.h>
 #include <linux/shmem_fs.h>
 #include <linux/slab.h>
 #include <linux/swap.h>
@@ -44,7 +46,7 @@ static void i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *o
 static void
 i915_gem_object_retire__write(struct drm_i915_gem_object *obj);
 static void
-i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring);
+i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int engine);
 
 static bool cpu_cache_is_coherent(struct drm_device *dev,
                                  enum i915_cache_level level)
@@ -54,12 +56,33 @@ static bool cpu_cache_is_coherent(struct drm_device *dev,
 
 static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
 {
+       if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+               return false;
+
        if (!cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
                return true;
 
        return obj->pin_display;
 }
 
+static int
+insert_mappable_node(struct drm_i915_private *i915,
+                     struct drm_mm_node *node, u32 size)
+{
+       memset(node, 0, sizeof(*node));
+       return drm_mm_insert_node_in_range_generic(&i915->ggtt.base.mm, node,
+                                                  size, 0, 0, 0,
+                                                  i915->ggtt.mappable_end,
+                                                  DRM_MM_SEARCH_DEFAULT,
+                                                  DRM_MM_CREATE_DEFAULT);
+}
+
+static void
+remove_mappable_node(struct drm_mm_node *node)
+{
+       drm_mm_remove_node(node);
+}
+
 /* some bookkeeping */
 static void i915_gem_info_add_obj(struct drm_i915_private *dev_priv,
                                  size_t size)
@@ -107,7 +130,7 @@ i915_gem_wait_for_error(struct i915_gpu_error *error)
 
 int i915_mutex_lock_interruptible(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
        ret = i915_gem_wait_for_error(&dev_priv->gpu_error);
@@ -177,7 +200,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
                vaddr += PAGE_SIZE;
        }
 
-       i915_gem_chipset_flush(obj->base.dev);
+       i915_gem_chipset_flush(to_i915(obj->base.dev));
 
        st = kmalloc(sizeof(*st), GFP_KERNEL);
        if (st == NULL)
@@ -266,13 +289,13 @@ drop_pages(struct drm_i915_gem_object *obj)
        struct i915_vma *vma, *next;
        int ret;
 
-       drm_gem_object_reference(&obj->base);
+       i915_gem_object_get(obj);
        list_for_each_entry_safe(vma, next, &obj->vma_list, obj_link)
                if (i915_vma_unbind(vma))
                        break;
 
        ret = i915_gem_object_put_pages(obj);
-       drm_gem_object_unreference(&obj->base);
+       i915_gem_object_put(obj);
 
        return ret;
 }
@@ -347,7 +370,7 @@ i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
        }
 
        drm_clflush_virt_range(vaddr, args->size);
-       i915_gem_chipset_flush(dev);
+       i915_gem_chipset_flush(to_i915(dev));
 
 out:
        intel_fb_obj_flush(obj, false, ORIGIN_CPU);
@@ -356,13 +379,13 @@ out:
 
 void *i915_gem_object_alloc(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        return kmem_cache_zalloc(dev_priv->objects, GFP_KERNEL);
 }
 
 void i915_gem_object_free(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        kmem_cache_free(dev_priv->objects, obj);
 }
 
@@ -381,13 +404,13 @@ i915_gem_create(struct drm_file *file,
                return -EINVAL;
 
        /* Allocate the new object */
-       obj = i915_gem_alloc_object(dev, size);
-       if (obj == NULL)
-               return -ENOMEM;
+       obj = i915_gem_object_create(dev, size);
+       if (IS_ERR(obj))
+               return PTR_ERR(obj);
 
        ret = drm_gem_handle_create(file, &obj->base, &handle);
        /* drop reference from allocate - handle holds it now */
-       drm_gem_object_unreference_unlocked(&obj->base);
+       i915_gem_object_put_unlocked(obj);
        if (ret)
                return ret;
 
@@ -409,6 +432,9 @@ i915_gem_dumb_create(struct drm_file *file,
 
 /**
  * Creates a new mm object and returns a handle to it.
+ * @dev: drm device pointer
+ * @data: ioctl data blob
+ * @file: drm file pointer
  */
 int
 i915_gem_create_ioctl(struct drm_device *dev, void *data,
@@ -484,9 +510,13 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
 
        *needs_clflush = 0;
 
-       if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
+       if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
                return -EINVAL;
 
+       ret = i915_gem_object_wait_rendering(obj, true);
+       if (ret)
+               return ret;
+
        if (!(obj->base.read_domains & I915_GEM_DOMAIN_CPU)) {
                /* If we're not in the cpu read domain, set ourself into the gtt
                 * read domain and manually flush cachelines (if required). This
@@ -494,9 +524,6 @@ int i915_gem_obj_prepare_shmem_read(struct drm_i915_gem_object *obj,
                 * anyway again before the next pread happens. */
                *needs_clflush = !cpu_cache_is_coherent(obj->base.dev,
                                                        obj->cache_level);
-               ret = i915_gem_object_wait_rendering(obj, true);
-               if (ret)
-                       return ret;
        }
 
        ret = i915_gem_object_get_pages(obj);
@@ -585,6 +612,142 @@ shmem_pread_slow(struct page *page, int shmem_page_offset, int page_length,
        return ret ? - EFAULT : 0;
 }
 
+static inline unsigned long
+slow_user_access(struct io_mapping *mapping,
+                uint64_t page_base, int page_offset,
+                char __user *user_data,
+                unsigned long length, bool pwrite)
+{
+       void __iomem *ioaddr;
+       void *vaddr;
+       uint64_t unwritten;
+
+       ioaddr = io_mapping_map_wc(mapping, page_base, PAGE_SIZE);
+       /* We can use the cpu mem copy function because this is X86. */
+       vaddr = (void __force *)ioaddr + page_offset;
+       if (pwrite)
+               unwritten = __copy_from_user(vaddr, user_data, length);
+       else
+               unwritten = __copy_to_user(user_data, vaddr, length);
+
+       io_mapping_unmap(ioaddr);
+       return unwritten;
+}
+
+static int
+i915_gem_gtt_pread(struct drm_device *dev,
+                  struct drm_i915_gem_object *obj, uint64_t size,
+                  uint64_t data_offset, uint64_t data_ptr)
+{
+       struct drm_i915_private *dev_priv = to_i915(dev);
+       struct i915_ggtt *ggtt = &dev_priv->ggtt;
+       struct drm_mm_node node;
+       char __user *user_data;
+       uint64_t remain;
+       uint64_t offset;
+       int ret;
+
+       ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE);
+       if (ret) {
+               ret = insert_mappable_node(dev_priv, &node, PAGE_SIZE);
+               if (ret)
+                       goto out;
+
+               ret = i915_gem_object_get_pages(obj);
+               if (ret) {
+                       remove_mappable_node(&node);
+                       goto out;
+               }
+
+               i915_gem_object_pin_pages(obj);
+       } else {
+               node.start = i915_gem_obj_ggtt_offset(obj);
+               node.allocated = false;
+               ret = i915_gem_object_put_fence(obj);
+               if (ret)
+                       goto out_unpin;
+       }
+
+       ret = i915_gem_object_set_to_gtt_domain(obj, false);
+       if (ret)
+               goto out_unpin;
+
+       user_data = u64_to_user_ptr(data_ptr);
+       remain = size;
+       offset = data_offset;
+
+       mutex_unlock(&dev->struct_mutex);
+       if (likely(!i915.prefault_disable)) {
+               ret = fault_in_multipages_writeable(user_data, remain);
+               if (ret) {
+                       mutex_lock(&dev->struct_mutex);
+                       goto out_unpin;
+               }
+       }
+
+       while (remain > 0) {
+               /* Operation in this page
+                *
+                * page_base = page offset within aperture
+                * page_offset = offset within page
+                * page_length = bytes to copy for this page
+                */
+               u32 page_base = node.start;
+               unsigned page_offset = offset_in_page(offset);
+               unsigned page_length = PAGE_SIZE - page_offset;
+               page_length = remain < page_length ? remain : page_length;
+               if (node.allocated) {
+                       wmb();
+                       ggtt->base.insert_page(&ggtt->base,
+                                              i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+                                              node.start,
+                                              I915_CACHE_NONE, 0);
+                       wmb();
+               } else {
+                       page_base += offset & PAGE_MASK;
+               }
+               /* This is a slow read/write as it tries to read from
+                * and write to user memory which may result into page
+                * faults, and so we cannot perform this under struct_mutex.
+                */
+               if (slow_user_access(ggtt->mappable, page_base,
+                                    page_offset, user_data,
+                                    page_length, false)) {
+                       ret = -EFAULT;
+                       break;
+               }
+
+               remain -= page_length;
+               user_data += page_length;
+               offset += page_length;
+       }
+
+       mutex_lock(&dev->struct_mutex);
+       if (ret == 0 && (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
+               /* The user has modified the object whilst we tried
+                * reading from it, and we now have no idea what domain
+                * the pages should be in. As we have just been touching
+                * them directly, flush everything back to the GTT
+                * domain.
+                */
+               ret = i915_gem_object_set_to_gtt_domain(obj, false);
+       }
+
+out_unpin:
+       if (node.allocated) {
+               wmb();
+               ggtt->base.clear_range(&ggtt->base,
+                                      node.start, node.size,
+                                      true);
+               i915_gem_object_unpin_pages(obj);
+               remove_mappable_node(&node);
+       } else {
+               i915_gem_object_ggtt_unpin(obj);
+       }
+out:
+       return ret;
+}
+
 static int
 i915_gem_shmem_pread(struct drm_device *dev,
                     struct drm_i915_gem_object *obj,
@@ -600,6 +763,9 @@ i915_gem_shmem_pread(struct drm_device *dev,
        int needs_clflush = 0;
        struct sg_page_iter sg_iter;
 
+       if (!i915_gem_object_has_struct_page(obj))
+               return -ENODEV;
+
        user_data = u64_to_user_ptr(args->data_ptr);
        remain = args->size;
 
@@ -672,6 +838,9 @@ out:
 
 /**
  * Reads data from the object referenced by handle.
+ * @dev: drm device pointer
+ * @data: ioctl data blob
+ * @file: drm file pointer
  *
  * On error, the contents of *data are undefined.
  */
@@ -695,8 +864,8 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
        if (ret)
                return ret;
 
-       obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
-       if (&obj->base == NULL) {
+       obj = i915_gem_object_lookup(file, args->handle);
+       if (!obj) {
                ret = -ENOENT;
                goto unlock;
        }
@@ -708,20 +877,17 @@ i915_gem_pread_ioctl(struct drm_device *dev, void *data,
                goto out;
        }
 
-       /* prime objects have no backing filp to GEM pread/pwrite
-        * pages from.
-        */
-       if (!obj->base.filp) {
-               ret = -EINVAL;
-               goto out;
-       }
-
        trace_i915_gem_object_pread(obj, args->offset, args->size);
 
        ret = i915_gem_shmem_pread(dev, obj, args, file);
 
+       /* pread for non shmem backed objects */
+       if (ret == -EFAULT || ret == -ENODEV)
+               ret = i915_gem_gtt_pread(dev, obj, args->size,
+                                       args->offset, args->data_ptr);
+
 out:
-       drm_gem_object_unreference(&obj->base);
+       i915_gem_object_put(obj);
 unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
@@ -753,60 +919,99 @@ fast_user_write(struct io_mapping *mapping,
 /**
  * This is the fast pwrite path, where we copy the data directly from the
  * user into the GTT, uncached.
+ * @i915: i915 device private data
+ * @obj: i915 gem object
+ * @args: pwrite arguments structure
+ * @file: drm file pointer
  */
 static int
-i915_gem_gtt_pwrite_fast(struct drm_device *dev,
+i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
                         struct drm_i915_gem_object *obj,
                         struct drm_i915_gem_pwrite *args,
                         struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
-       ssize_t remain;
-       loff_t offset, page_base;
+       struct i915_ggtt *ggtt = &i915->ggtt;
+       struct drm_device *dev = obj->base.dev;
+       struct drm_mm_node node;
+       uint64_t remain, offset;
        char __user *user_data;
-       int page_offset, page_length, ret;
+       int ret;
+       bool hit_slow_path = false;
+
+       if (obj->tiling_mode != I915_TILING_NONE)
+               return -EFAULT;
 
        ret = i915_gem_obj_ggtt_pin(obj, 0, PIN_MAPPABLE | PIN_NONBLOCK);
-       if (ret)
-               goto out;
+       if (ret) {
+               ret = insert_mappable_node(i915, &node, PAGE_SIZE);
+               if (ret)
+                       goto out;
+
+               ret = i915_gem_object_get_pages(obj);
+               if (ret) {
+                       remove_mappable_node(&node);
+                       goto out;
+               }
+
+               i915_gem_object_pin_pages(obj);
+       } else {
+               node.start = i915_gem_obj_ggtt_offset(obj);
+               node.allocated = false;
+               ret = i915_gem_object_put_fence(obj);
+               if (ret)
+                       goto out_unpin;
+       }
 
        ret = i915_gem_object_set_to_gtt_domain(obj, true);
        if (ret)
                goto out_unpin;
 
-       ret = i915_gem_object_put_fence(obj);
-       if (ret)
-               goto out_unpin;
+       intel_fb_obj_invalidate(obj, ORIGIN_GTT);
+       obj->dirty = true;
 
        user_data = u64_to_user_ptr(args->data_ptr);
+       offset = args->offset;
        remain = args->size;
-
-       offset = i915_gem_obj_ggtt_offset(obj) + args->offset;
-
-       intel_fb_obj_invalidate(obj, ORIGIN_GTT);
-
-       while (remain > 0) {
+       while (remain) {
                /* Operation in this page
                 *
                 * page_base = page offset within aperture
                 * page_offset = offset within page
                 * page_length = bytes to copy for this page
                 */
-               page_base = offset & PAGE_MASK;
-               page_offset = offset_in_page(offset);
-               page_length = remain;
-               if ((page_offset + remain) > PAGE_SIZE)
-                       page_length = PAGE_SIZE - page_offset;
-
+               u32 page_base = node.start;
+               unsigned page_offset = offset_in_page(offset);
+               unsigned page_length = PAGE_SIZE - page_offset;
+               page_length = remain < page_length ? remain : page_length;
+               if (node.allocated) {
+                       wmb(); /* flush the write before we modify the GGTT */
+                       ggtt->base.insert_page(&ggtt->base,
+                                              i915_gem_object_get_dma_address(obj, offset >> PAGE_SHIFT),
+                                              node.start, I915_CACHE_NONE, 0);
+                       wmb(); /* flush modifications to the GGTT (insert_page) */
+               } else {
+                       page_base += offset & PAGE_MASK;
+               }
                /* If we get a fault while copying data, then (presumably) our
                 * source page isn't available.  Return the error and we'll
                 * retry in the slow path.
+                * If the object is non-shmem backed, we retry again with the
+                * path that handles page fault.
                 */
                if (fast_user_write(ggtt->mappable, page_base,
                                    page_offset, user_data, page_length)) {
-                       ret = -EFAULT;
-                       goto out_flush;
+                       hit_slow_path = true;
+                       mutex_unlock(&dev->struct_mutex);
+                       if (slow_user_access(ggtt->mappable,
+                                            page_base,
+                                            page_offset, user_data,
+                                            page_length, true)) {
+                               ret = -EFAULT;
+                               mutex_lock(&dev->struct_mutex);
+                               goto out_flush;
+                       }
+
+                       mutex_lock(&dev->struct_mutex);
                }
 
                remain -= page_length;
@@ -815,9 +1020,31 @@ i915_gem_gtt_pwrite_fast(struct drm_device *dev,
        }
 
 out_flush:
+       if (hit_slow_path) {
+               if (ret == 0 &&
+                   (obj->base.read_domains & I915_GEM_DOMAIN_GTT) == 0) {
+                       /* The user has modified the object whilst we tried
+                        * reading from it, and we now have no idea what domain
+                        * the pages should be in. As we have just been touching
+                        * them directly, flush everything back to the GTT
+                        * domain.
+                        */
+                       ret = i915_gem_object_set_to_gtt_domain(obj, false);
+               }
+       }
+
        intel_fb_obj_flush(obj, false, ORIGIN_GTT);
 out_unpin:
-       i915_gem_object_ggtt_unpin(obj);
+       if (node.allocated) {
+               wmb();
+               ggtt->base.clear_range(&ggtt->base,
+                                      node.start, node.size,
+                                      true);
+               i915_gem_object_unpin_pages(obj);
+               remove_mappable_node(&node);
+       } else {
+               i915_gem_object_ggtt_unpin(obj);
+       }
 out:
        return ret;
 }
@@ -908,15 +1135,16 @@ i915_gem_shmem_pwrite(struct drm_device *dev,
 
        obj_do_bit17_swizzling = i915_gem_object_needs_bit17_swizzle(obj);
 
+       ret = i915_gem_object_wait_rendering(obj, false);
+       if (ret)
+               return ret;
+
        if (obj->base.write_domain != I915_GEM_DOMAIN_CPU) {
                /* If we're not in the cpu write domain, set ourself into the gtt
                 * write domain and manually flush cachelines (if required). This
                 * optimizes for the case when the gpu will use the data
                 * right away and we therefore have to clflush anyway. */
                needs_clflush_after = cpu_write_needs_clflush(obj);
-               ret = i915_gem_object_wait_rendering(obj, false);
-               if (ret)
-                       return ret;
        }
        /* Same trick applies to invalidate partially written cachelines read
         * before writing. */
@@ -1006,7 +1234,7 @@ out:
        }
 
        if (needs_clflush_after)
-               i915_gem_chipset_flush(dev);
+               i915_gem_chipset_flush(to_i915(dev));
        else
                obj->cache_dirty = true;
 
@@ -1016,6 +1244,9 @@ out:
 
 /**
  * Writes data to the object referenced by handle.
+ * @dev: drm device
+ * @data: ioctl data blob
+ * @file: drm file
  *
  * On error, the contents of the buffer that were to be modified are undefined.
  */
@@ -1023,7 +1254,7 @@ int
 i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                      struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_pwrite *args = data;
        struct drm_i915_gem_object *obj;
        int ret;
@@ -1049,8 +1280,8 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
        if (ret)
                goto put_rpm;
 
-       obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
-       if (&obj->base == NULL) {
+       obj = i915_gem_object_lookup(file, args->handle);
+       if (!obj) {
                ret = -ENOENT;
                goto unlock;
        }
@@ -1062,14 +1293,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
                goto out;
        }
 
-       /* prime objects have no backing filp to GEM pread/pwrite
-        * pages from.
-        */
-       if (!obj->base.filp) {
-               ret = -EINVAL;
-               goto out;
-       }
-
        trace_i915_gem_object_pwrite(obj, args->offset, args->size);
 
        ret = -EFAULT;
@@ -1079,10 +1302,9 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
         * pread/pwrite currently are reading and writing from the CPU
         * perspective, requiring manual detiling by the client.
         */
-       if (obj->tiling_mode == I915_TILING_NONE &&
-           obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
+       if (!i915_gem_object_has_struct_page(obj) ||
            cpu_write_needs_clflush(obj)) {
-               ret = i915_gem_gtt_pwrite_fast(dev, obj, args, file);
+               ret = i915_gem_gtt_pwrite_fast(dev_priv, obj, args, file);
                /* Note that the gtt paths might fail with non-page-backed user
                 * pointers (e.g. gtt mappings when moving data between
                 * textures). Fallback to the shmem path in that case. */
@@ -1091,12 +1313,14 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
        if (ret == -EFAULT || ret == -ENOSPC) {
                if (obj->phys_handle)
                        ret = i915_gem_phys_pwrite(obj, args, file);
-               else
+               else if (i915_gem_object_has_struct_page(obj))
                        ret = i915_gem_shmem_pwrite(dev, obj, args, file);
+               else
+                       ret = -ENODEV;
        }
 
 out:
-       drm_gem_object_unreference(&obj->base);
+       i915_gem_object_put(obj);
 unlock:
        mutex_unlock(&dev->struct_mutex);
 put_rpm:
@@ -1105,377 +1329,19 @@ put_rpm:
        return ret;
 }
 
-static int
-i915_gem_check_wedge(unsigned reset_counter, bool interruptible)
-{
-       if (__i915_terminally_wedged(reset_counter))
-               return -EIO;
-
-       if (__i915_reset_in_progress(reset_counter)) {
-               /* Non-interruptible callers can't handle -EAGAIN, hence return
-                * -EIO unconditionally for these. */
-               if (!interruptible)
-                       return -EIO;
-
-               return -EAGAIN;
-       }
-
-       return 0;
-}
-
-static void fake_irq(unsigned long data)
-{
-       wake_up_process((struct task_struct *)data);
-}
-
-static bool missed_irq(struct drm_i915_private *dev_priv,
-                      struct intel_engine_cs *engine)
-{
-       return test_bit(engine->id, &dev_priv->gpu_error.missed_irq_rings);
-}
-
-static unsigned long local_clock_us(unsigned *cpu)
-{
-       unsigned long t;
-
-       /* Cheaply and approximately convert from nanoseconds to microseconds.
-        * The result and subsequent calculations are also defined in the same
-        * approximate microseconds units. The principal source of timing
-        * error here is from the simple truncation.
-        *
-        * Note that local_clock() is only defined wrt to the current CPU;
-        * the comparisons are no longer valid if we switch CPUs. Instead of
-        * blocking preemption for the entire busywait, we can detect the CPU
-        * switch and use that as indicator of system load and a reason to
-        * stop busywaiting, see busywait_stop().
-        */
-       *cpu = get_cpu();
-       t = local_clock() >> 10;
-       put_cpu();
-
-       return t;
-}
-
-static bool busywait_stop(unsigned long timeout, unsigned cpu)
-{
-       unsigned this_cpu;
-
-       if (time_after(local_clock_us(&this_cpu), timeout))
-               return true;
-
-       return this_cpu != cpu;
-}
-
-static int __i915_spin_request(struct drm_i915_gem_request *req, int state)
-{
-       unsigned long timeout;
-       unsigned cpu;
-
-       /* When waiting for high frequency requests, e.g. during synchronous
-        * rendering split between the CPU and GPU, the finite amount of time
-        * required to set up the irq and wait upon it limits the response
-        * rate. By busywaiting on the request completion for a short while we
-        * can service the high frequency waits as quick as possible. However,
-        * if it is a slow request, we want to sleep as quickly as possible.
-        * The tradeoff between waiting and sleeping is roughly the time it
-        * takes to sleep on a request, on the order of a microsecond.
-        */
-
-       if (req->engine->irq_refcount)
-               return -EBUSY;
-
-       /* Only spin if we know the GPU is processing this request */
-       if (!i915_gem_request_started(req, true))
-               return -EAGAIN;
-
-       timeout = local_clock_us(&cpu) + 5;
-       while (!need_resched()) {
-               if (i915_gem_request_completed(req, true))
-                       return 0;
-
-               if (signal_pending_state(state, current))
-                       break;
-
-               if (busywait_stop(timeout, cpu))
-                       break;
-
-               cpu_relax_lowlatency();
-       }
-
-       if (i915_gem_request_completed(req, false))
-               return 0;
-
-       return -EAGAIN;
-}
-
-/**
- * __i915_wait_request - wait until execution of request has finished
- * @req: duh!
- * @interruptible: do an interruptible wait (normally yes)
- * @timeout: in - how long to wait (NULL forever); out - how much time remaining
- *
- * Note: It is of utmost importance that the passed in seqno and reset_counter
- * values have been read by the caller in an smp safe manner. Where read-side
- * locks are involved, it is sufficient to read the reset_counter before
- * unlocking the lock that protects the seqno. For lockless tricks, the
- * reset_counter _must_ be read before, and an appropriate smp_rmb must be
- * inserted.
- *
- * Returns 0 if the request was found within the alloted time. Else returns the
- * errno with remaining time filled in timeout argument.
- */
-int __i915_wait_request(struct drm_i915_gem_request *req,
-                       bool interruptible,
-                       s64 *timeout,
-                       struct intel_rps_client *rps)
-{
-       struct intel_engine_cs *engine = i915_gem_request_get_engine(req);
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       const bool irq_test_in_progress =
-               ACCESS_ONCE(dev_priv->gpu_error.test_irq_rings) & intel_engine_flag(engine);
-       int state = interruptible ? TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE;
-       DEFINE_WAIT(wait);
-       unsigned long timeout_expire;
-       s64 before = 0; /* Only to silence a compiler warning. */
-       int ret;
-
-       WARN(!intel_irqs_enabled(dev_priv), "IRQs disabled");
-
-       if (list_empty(&req->list))
-               return 0;
-
-       if (i915_gem_request_completed(req, true))
-               return 0;
-
-       timeout_expire = 0;
-       if (timeout) {
-               if (WARN_ON(*timeout < 0))
-                       return -EINVAL;
-
-               if (*timeout == 0)
-                       return -ETIME;
-
-               timeout_expire = jiffies + nsecs_to_jiffies_timeout(*timeout);
-
-               /*
-                * Record current time in case interrupted by signal, or wedged.
-                */
-               before = ktime_get_raw_ns();
-       }
-
-       if (INTEL_INFO(dev_priv)->gen >= 6)
-               gen6_rps_boost(dev_priv, rps, req->emitted_jiffies);
-
-       trace_i915_gem_request_wait_begin(req);
-
-       /* Optimistic spin for the next jiffie before touching IRQs */
-       ret = __i915_spin_request(req, state);
-       if (ret == 0)
-               goto out;
-
-       if (!irq_test_in_progress && WARN_ON(!engine->irq_get(engine))) {
-               ret = -ENODEV;
-               goto out;
-       }
-
-       for (;;) {
-               struct timer_list timer;
-
-               prepare_to_wait(&engine->irq_queue, &wait, state);
-
-               /* We need to check whether any gpu reset happened in between
-                * the request being submitted and now. If a reset has occurred,
-                * the request is effectively complete (we either are in the
-                * process of or have discarded the rendering and completely
-                * reset the GPU. The results of the request are lost and we
-                * are free to continue on with the original operation.
-                */
-               if (req->reset_counter != i915_reset_counter(&dev_priv->gpu_error)) {
-                       ret = 0;
-                       break;
-               }
-
-               if (i915_gem_request_completed(req, false)) {
-                       ret = 0;
-                       break;
-               }
-
-               if (signal_pending_state(state, current)) {
-                       ret = -ERESTARTSYS;
-                       break;
-               }
-
-               if (timeout && time_after_eq(jiffies, timeout_expire)) {
-                       ret = -ETIME;
-                       break;
-               }
-
-               timer.function = NULL;
-               if (timeout || missed_irq(dev_priv, engine)) {
-                       unsigned long expire;
-
-                       setup_timer_on_stack(&timer, fake_irq, (unsigned long)current);
-                       expire = missed_irq(dev_priv, engine) ? jiffies + 1 : timeout_expire;
-                       mod_timer(&timer, expire);
-               }
-
-               io_schedule();
-
-               if (timer.function) {
-                       del_singleshot_timer_sync(&timer);
-                       destroy_timer_on_stack(&timer);
-               }
-       }
-       if (!irq_test_in_progress)
-               engine->irq_put(engine);
-
-       finish_wait(&engine->irq_queue, &wait);
-
-out:
-       trace_i915_gem_request_wait_end(req);
-
-       if (timeout) {
-               s64 tres = *timeout - (ktime_get_raw_ns() - before);
-
-               *timeout = tres < 0 ? 0 : tres;
-
-               /*
-                * Apparently ktime isn't accurate enough and occasionally has a
-                * bit of mismatch in the jiffies<->nsecs<->ktime loop. So patch
-                * things up to make the test happy. We allow up to 1 jiffy.
-                *
-                * This is a regrssion from the timespec->ktime conversion.
-                */
-               if (ret == -ETIME && *timeout < jiffies_to_usecs(1)*1000)
-                       *timeout = 0;
-       }
-
-       return ret;
-}
-
-int i915_gem_request_add_to_client(struct drm_i915_gem_request *req,
-                                  struct drm_file *file)
-{
-       struct drm_i915_file_private *file_priv;
-
-       WARN_ON(!req || !file || req->file_priv);
-
-       if (!req || !file)
-               return -EINVAL;
-
-       if (req->file_priv)
-               return -EINVAL;
-
-       file_priv = file->driver_priv;
-
-       spin_lock(&file_priv->mm.lock);
-       req->file_priv = file_priv;
-       list_add_tail(&req->client_list, &file_priv->mm.request_list);
-       spin_unlock(&file_priv->mm.lock);
-
-       req->pid = get_pid(task_pid(current));
-
-       return 0;
-}
-
-static inline void
-i915_gem_request_remove_from_client(struct drm_i915_gem_request *request)
-{
-       struct drm_i915_file_private *file_priv = request->file_priv;
-
-       if (!file_priv)
-               return;
-
-       spin_lock(&file_priv->mm.lock);
-       list_del(&request->client_list);
-       request->file_priv = NULL;
-       spin_unlock(&file_priv->mm.lock);
-
-       put_pid(request->pid);
-       request->pid = NULL;
-}
-
-static void i915_gem_request_retire(struct drm_i915_gem_request *request)
-{
-       trace_i915_gem_request_retire(request);
-
-       /* We know the GPU must have read the request to have
-        * sent us the seqno + interrupt, so use the position
-        * of tail of the request to update the last known position
-        * of the GPU head.
-        *
-        * Note this requires that we are always called in request
-        * completion order.
-        */
-       request->ringbuf->last_retired_head = request->postfix;
-
-       list_del_init(&request->list);
-       i915_gem_request_remove_from_client(request);
-
-       i915_gem_request_unreference(request);
-}
-
-static void
-__i915_gem_request_retire__upto(struct drm_i915_gem_request *req)
-{
-       struct intel_engine_cs *engine = req->engine;
-       struct drm_i915_gem_request *tmp;
-
-       lockdep_assert_held(&engine->dev->struct_mutex);
-
-       if (list_empty(&req->list))
-               return;
-
-       do {
-               tmp = list_first_entry(&engine->request_list,
-                                      typeof(*tmp), list);
-
-               i915_gem_request_retire(tmp);
-       } while (tmp != req);
-
-       WARN_ON(i915_verify_lists(engine->dev));
-}
-
-/**
- * Waits for a request to be signaled, and cleans up the
- * request and object lists appropriately for that event.
- */
-int
-i915_wait_request(struct drm_i915_gem_request *req)
-{
-       struct drm_i915_private *dev_priv = req->i915;
-       bool interruptible;
-       int ret;
-
-       interruptible = dev_priv->mm.interruptible;
-
-       BUG_ON(!mutex_is_locked(&dev_priv->dev->struct_mutex));
-
-       ret = __i915_wait_request(req, interruptible, NULL, NULL);
-       if (ret)
-               return ret;
-
-       /* If the GPU hung, we want to keep the requests to find the guilty. */
-       if (req->reset_counter == i915_reset_counter(&dev_priv->gpu_error))
-               __i915_gem_request_retire__upto(req);
-
-       return 0;
-}
-
 /**
  * Ensures that all rendering to the object has completed and the object is
  * safe to unbind from the GTT or access from the CPU.
+ * @obj: i915 gem object
+ * @readonly: waiting for read access or write
  */
 int
 i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
                               bool readonly)
 {
+       struct reservation_object *resv;
        int ret, i;
 
-       if (!obj->active)
-               return 0;
-
        if (readonly) {
                if (obj->last_write_req != NULL) {
                        ret = i915_wait_request(obj->last_write_req);
@@ -1502,6 +1368,16 @@ i915_gem_object_wait_rendering(struct drm_i915_gem_object *obj,
                GEM_BUG_ON(obj->active);
        }
 
+       resv = i915_gem_object_get_dmabuf_resv(obj);
+       if (resv) {
+               long err;
+
+               err = reservation_object_wait_timeout_rcu(resv, !readonly, true,
+                                                         MAX_SCHEDULE_TIMEOUT);
+               if (err < 0)
+                       return err;
+       }
+
        return 0;
 }
 
@@ -1509,15 +1385,15 @@ static void
 i915_gem_object_retire_request(struct drm_i915_gem_object *obj,
                               struct drm_i915_gem_request *req)
 {
-       int ring = req->engine->id;
+       int idx = req->engine->id;
 
-       if (obj->last_read_req[ring] == req)
-               i915_gem_object_retire__read(obj, ring);
+       if (obj->last_read_req[idx] == req)
+               i915_gem_object_retire__read(obj, idx);
        else if (obj->last_write_req == req)
                i915_gem_object_retire__write(obj);
 
-       if (req->reset_counter == i915_reset_counter(&req->i915->gpu_error))
-               __i915_gem_request_retire__upto(req);
+       if (!i915_reset_in_progress(&req->i915->gpu_error))
+               i915_gem_request_retire_upto(req);
 }
 
 /* A nonblocking variant of the above wait. This is a highly dangerous routine
@@ -1529,7 +1405,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
                                            bool readonly)
 {
        struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_request *requests[I915_NUM_ENGINES];
        int ret, i, n = 0;
 
@@ -1546,7 +1422,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
                if (req == NULL)
                        return 0;
 
-               requests[n++] = i915_gem_request_reference(req);
+               requests[n++] = i915_gem_request_get(req);
        } else {
                for (i = 0; i < I915_NUM_ENGINES; i++) {
                        struct drm_i915_gem_request *req;
@@ -1555,7 +1431,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
                        if (req == NULL)
                                continue;
 
-                       requests[n++] = i915_gem_request_reference(req);
+                       requests[n++] = i915_gem_request_get(req);
                }
        }
 
@@ -1568,7 +1444,7 @@ i915_gem_object_wait_rendering__nonblocking(struct drm_i915_gem_object *obj,
        for (i = 0; i < n; i++) {
                if (ret == 0)
                        i915_gem_object_retire_request(obj, requests[i]);
-               i915_gem_request_unreference(requests[i]);
+               i915_gem_request_put(requests[i]);
        }
 
        return ret;
@@ -1580,9 +1456,19 @@ static struct intel_rps_client *to_rps_client(struct drm_file *file)
        return &fpriv->rps;
 }
 
+static enum fb_op_origin
+write_origin(struct drm_i915_gem_object *obj, unsigned domain)
+{
+       return domain == I915_GEM_DOMAIN_GTT && !obj->has_wc_mmap ?
+              ORIGIN_GTT : ORIGIN_CPU;
+}
+
 /**
  * Called when user space prepares to use an object with the CPU, either
  * through the mmap ioctl's mapping or a GTT mapping.
+ * @dev: drm device
+ * @data: ioctl data blob
+ * @file: drm file
  */
 int
 i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
@@ -1611,8 +1497,8 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
        if (ret)
                return ret;
 
-       obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
-       if (&obj->base == NULL) {
+       obj = i915_gem_object_lookup(file, args->handle);
+       if (!obj) {
                ret = -ENOENT;
                goto unlock;
        }
@@ -1633,12 +1519,10 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
                ret = i915_gem_object_set_to_cpu_domain(obj, write_domain != 0);
 
        if (write_domain != 0)
-               intel_fb_obj_invalidate(obj,
-                                       write_domain == I915_GEM_DOMAIN_GTT ?
-                                       ORIGIN_GTT : ORIGIN_CPU);
+               intel_fb_obj_invalidate(obj, write_origin(obj, write_domain));
 
 unref:
-       drm_gem_object_unreference(&obj->base);
+       i915_gem_object_put(obj);
 unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
@@ -1646,6 +1530,9 @@ unlock:
 
 /**
  * Called when user space has done writes to this buffer
+ * @dev: drm device
+ * @data: ioctl data blob
+ * @file: drm file
  */
 int
 i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
@@ -1659,8 +1546,8 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
        if (ret)
                return ret;
 
-       obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
-       if (&obj->base == NULL) {
+       obj = i915_gem_object_lookup(file, args->handle);
+       if (!obj) {
                ret = -ENOENT;
                goto unlock;
        }
@@ -1669,15 +1556,18 @@ i915_gem_sw_finish_ioctl(struct drm_device *dev, void *data,
        if (obj->pin_display)
                i915_gem_object_flush_cpu_write_domain(obj);
 
-       drm_gem_object_unreference(&obj->base);
+       i915_gem_object_put(obj);
 unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
 }
 
 /**
- * Maps the contents of an object, returning the address it is mapped
- * into.
+ * i915_gem_mmap_ioctl - Maps the contents of an object, returning the address
+ *                      it is mapped to.
+ * @dev: drm device
+ * @data: ioctl data blob
+ * @file: drm file
  *
  * While the mapping holds a reference on the contents of the object, it doesn't
  * imply a ref on the object itself.
@@ -1697,7 +1587,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                    struct drm_file *file)
 {
        struct drm_i915_gem_mmap *args = data;
-       struct drm_gem_object *obj;
+       struct drm_i915_gem_object *obj;
        unsigned long addr;
 
        if (args->flags & ~(I915_MMAP_WC))
@@ -1706,19 +1596,19 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
        if (args->flags & I915_MMAP_WC && !boot_cpu_has(X86_FEATURE_PAT))
                return -ENODEV;
 
-       obj = drm_gem_object_lookup(file, args->handle);
-       if (obj == NULL)
+       obj = i915_gem_object_lookup(file, args->handle);
+       if (!obj)
                return -ENOENT;
 
        /* prime objects have no backing filp to GEM mmap
         * pages from.
         */
-       if (!obj->filp) {
-               drm_gem_object_unreference_unlocked(obj);
+       if (!obj->base.filp) {
+               i915_gem_object_put_unlocked(obj);
                return -EINVAL;
        }
 
-       addr = vm_mmap(obj->filp, 0, args->size,
+       addr = vm_mmap(obj->base.filp, 0, args->size,
                       PROT_READ | PROT_WRITE, MAP_SHARED,
                       args->offset);
        if (args->flags & I915_MMAP_WC) {
@@ -1726,7 +1616,7 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                struct vm_area_struct *vma;
 
                if (down_write_killable(&mm->mmap_sem)) {
-                       drm_gem_object_unreference_unlocked(obj);
+                       i915_gem_object_put_unlocked(obj);
                        return -EINTR;
                }
                vma = find_vma(mm, addr);
@@ -1736,8 +1626,11 @@ i915_gem_mmap_ioctl(struct drm_device *dev, void *data,
                else
                        addr = -ENOMEM;
                up_write(&mm->mmap_sem);
+
+               /* This may race, but that's ok, it only gets set */
+               WRITE_ONCE(obj->has_wc_mmap, true);
        }
-       drm_gem_object_unreference_unlocked(obj);
+       i915_gem_object_put_unlocked(obj);
        if (IS_ERR((void *)addr))
                return addr;
 
@@ -1982,7 +1875,7 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
                return size;
 
        /* Previous chips need a power-of-two fence region when tiling */
-       if (INTEL_INFO(dev)->gen == 3)
+       if (IS_GEN3(dev))
                gtt_size = 1024*1024;
        else
                gtt_size = 512*1024;
@@ -1995,7 +1888,10 @@ i915_gem_get_gtt_size(struct drm_device *dev, uint32_t size, int tiling_mode)
 
 /**
  * i915_gem_get_gtt_alignment - return required GTT alignment for an object
- * @obj: object to check
+ * @dev: drm device
+ * @size: object size
+ * @tiling_mode: tiling mode
+ * @fenced: is fenced alignemned required or not
  *
  * Return the required GTT alignment for an object, taking into account
  * potential fence register mapping.
@@ -2021,7 +1917,7 @@ i915_gem_get_gtt_alignment(struct drm_device *dev, uint32_t size,
 
 static int i915_gem_object_create_mmap_offset(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        int ret;
 
        dev_priv->mm.shrinker_no_lock_stealing = true;
@@ -2072,8 +1968,8 @@ i915_gem_mmap_gtt(struct drm_file *file,
        if (ret)
                return ret;
 
-       obj = to_intel_bo(drm_gem_object_lookup(file, handle));
-       if (&obj->base == NULL) {
+       obj = i915_gem_object_lookup(file, handle);
+       if (!obj) {
                ret = -ENOENT;
                goto unlock;
        }
@@ -2091,7 +1987,7 @@ i915_gem_mmap_gtt(struct drm_file *file,
        *offset = drm_vma_node_offset_addr(&obj->base.vma_node);
 
 out:
-       drm_gem_object_unreference(&obj->base);
+       i915_gem_object_put(obj);
 unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
@@ -2162,7 +2058,8 @@ i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
 static void
 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
 {
-       struct sg_page_iter sg_iter;
+       struct sgt_iter sgt_iter;
+       struct page *page;
        int ret;
 
        BUG_ON(obj->madv == __I915_MADV_PURGED);
@@ -2184,9 +2081,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj)
        if (obj->madv == I915_MADV_DONTNEED)
                obj->dirty = 0;
 
-       for_each_sg_page(obj->pages->sgl, &sg_iter, obj->pages->nents, 0) {
-               struct page *page = sg_page_iter_page(&sg_iter);
-
+       for_each_sgt_page(page, sgt_iter, obj->pages) {
                if (obj->dirty)
                        set_page_dirty(page);
 
@@ -2238,12 +2133,12 @@ i915_gem_object_put_pages(struct drm_i915_gem_object *obj)
 static int
 i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        int page_count, i;
        struct address_space *mapping;
        struct sg_table *st;
        struct scatterlist *sg;
-       struct sg_page_iter sg_iter;
+       struct sgt_iter sgt_iter;
        struct page *page;
        unsigned long last_pfn = 0;     /* suppress gcc warning */
        int ret;
@@ -2340,8 +2235,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 
 err_pages:
        sg_mark_end(sg);
-       for_each_sg_page(st->sgl, &sg_iter, st->nents, 0)
-               put_page(sg_page_iter_page(&sg_iter));
+       for_each_sgt_page(page, sgt_iter, st)
+               put_page(page);
        sg_free_table(st);
        kfree(st);
 
@@ -2369,7 +2264,7 @@ err_pages:
 int
 i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        const struct drm_i915_gem_object_ops *ops = obj->ops;
        int ret;
 
@@ -2395,6 +2290,44 @@ i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
        return 0;
 }
 
+/* The 'mapping' part of i915_gem_object_pin_map() below */
+static void *i915_gem_object_map(const struct drm_i915_gem_object *obj)
+{
+       unsigned long n_pages = obj->base.size >> PAGE_SHIFT;
+       struct sg_table *sgt = obj->pages;
+       struct sgt_iter sgt_iter;
+       struct page *page;
+       struct page *stack_pages[32];
+       struct page **pages = stack_pages;
+       unsigned long i = 0;
+       void *addr;
+
+       /* A single page can always be kmapped */
+       if (n_pages == 1)
+               return kmap(sg_page(sgt->sgl));
+
+       if (n_pages > ARRAY_SIZE(stack_pages)) {
+               /* Too big for stack -- allocate temporary array instead */
+               pages = drm_malloc_gfp(n_pages, sizeof(*pages), GFP_TEMPORARY);
+               if (!pages)
+                       return NULL;
+       }
+
+       for_each_sgt_page(page, sgt_iter, sgt)
+               pages[i++] = page;
+
+       /* Check that we have the expected number of pages */
+       GEM_BUG_ON(i != n_pages);
+
+       addr = vmap(pages, n_pages, 0, PAGE_KERNEL);
+
+       if (pages != stack_pages)
+               drm_free_large(pages);
+
+       return addr;
+}
+
+/* get, pin, and map the pages of the object into kernel space */
 void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
 {
        int ret;
@@ -2407,29 +2340,9 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj)
 
        i915_gem_object_pin_pages(obj);
 
-       if (obj->mapping == NULL) {
-               struct page **pages;
-
-               pages = NULL;
-               if (obj->base.size == PAGE_SIZE)
-                       obj->mapping = kmap(sg_page(obj->pages->sgl));
-               else
-                       pages = drm_malloc_gfp(obj->base.size >> PAGE_SHIFT,
-                                              sizeof(*pages),
-                                              GFP_TEMPORARY);
-               if (pages != NULL) {
-                       struct sg_page_iter sg_iter;
-                       int n;
-
-                       n = 0;
-                       for_each_sg_page(obj->pages->sgl, &sg_iter,
-                                        obj->pages->nents, 0)
-                               pages[n++] = sg_page_iter_page(&sg_iter);
-
-                       obj->mapping = vmap(pages, n, 0, PAGE_KERNEL);
-                       drm_free_large(pages);
-               }
-               if (obj->mapping == NULL) {
+       if (!obj->mapping) {
+               obj->mapping = i915_gem_object_map(obj);
+               if (!obj->mapping) {
                        i915_gem_object_unpin_pages(obj);
                        return ERR_PTR(-ENOMEM);
                }
@@ -2448,7 +2361,7 @@ void i915_vma_move_to_active(struct i915_vma *vma,
 
        /* Add a reference if we're newly entering the active list. */
        if (obj->active == 0)
-               drm_gem_object_reference(&obj->base);
+               i915_gem_object_get(obj);
        obj->active |= intel_engine_flag(engine);
 
        list_move_tail(&obj->engine_list[engine->id], &engine->active_list);
@@ -2468,20 +2381,20 @@ i915_gem_object_retire__write(struct drm_i915_gem_object *obj)
 }
 
 static void
-i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
+i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int idx)
 {
        struct i915_vma *vma;
 
-       GEM_BUG_ON(obj->last_read_req[ring] == NULL);
-       GEM_BUG_ON(!(obj->active & (1 << ring)));
+       GEM_BUG_ON(obj->last_read_req[idx] == NULL);
+       GEM_BUG_ON(!(obj->active & (1 << idx)));
 
-       list_del_init(&obj->engine_list[ring]);
-       i915_gem_request_assign(&obj->last_read_req[ring], NULL);
+       list_del_init(&obj->engine_list[idx]);
+       i915_gem_request_assign(&obj->last_read_req[idx], NULL);
 
-       if (obj->last_write_req && obj->last_write_req->engine->id == ring)
+       if (obj->last_write_req && obj->last_write_req->engine->id == idx)
                i915_gem_object_retire__write(obj);
 
-       obj->active &= ~(1 << ring);
+       obj->active &= ~(1 << idx);
        if (obj->active)
                return;
 
@@ -2498,335 +2411,38 @@ i915_gem_object_retire__read(struct drm_i915_gem_object *obj, int ring)
        }
 
        i915_gem_request_assign(&obj->last_fenced_req, NULL);
-       drm_gem_object_unreference(&obj->base);
-}
-
-static int
-i915_gem_init_seqno(struct drm_device *dev, u32 seqno)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       struct intel_engine_cs *engine;
-       int ret;
-
-       /* Carefully retire all requests without writing to the rings */
-       for_each_engine(engine, dev_priv) {
-               ret = intel_engine_idle(engine);
-               if (ret)
-                       return ret;
-       }
-       i915_gem_retire_requests(dev);
-
-       /* Finally reset hw state */
-       for_each_engine(engine, dev_priv)
-               intel_ring_init_seqno(engine, seqno);
-
-       return 0;
-}
-
-int i915_gem_set_seqno(struct drm_device *dev, u32 seqno)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret;
-
-       if (seqno == 0)
-               return -EINVAL;
-
-       /* HWS page needs to be set less than what we
-        * will inject to ring
-        */
-       ret = i915_gem_init_seqno(dev, seqno - 1);
-       if (ret)
-               return ret;
-
-       /* Carefully set the last_seqno value so that wrap
-        * detection still works
-        */
-       dev_priv->next_seqno = seqno;
-       dev_priv->last_seqno = seqno - 1;
-       if (dev_priv->last_seqno == 0)
-               dev_priv->last_seqno--;
-
-       return 0;
-}
-
-int
-i915_gem_get_seqno(struct drm_device *dev, u32 *seqno)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-
-       /* reserve 0 for non-seqno */
-       if (dev_priv->next_seqno == 0) {
-               int ret = i915_gem_init_seqno(dev, 0);
-               if (ret)
-                       return ret;
-
-               dev_priv->next_seqno = 1;
-       }
-
-       *seqno = dev_priv->last_seqno = dev_priv->next_seqno++;
-       return 0;
-}
-
-/*
- * NB: This function is not allowed to fail. Doing so would mean the the
- * request is not being tracked for completion but the work itself is
- * going to happen on the hardware. This would be a Bad Thing(tm).
- */
-void __i915_add_request(struct drm_i915_gem_request *request,
-                       struct drm_i915_gem_object *obj,
-                       bool flush_caches)
-{
-       struct intel_engine_cs *engine;
-       struct drm_i915_private *dev_priv;
-       struct intel_ringbuffer *ringbuf;
-       u32 request_start;
-       int ret;
-
-       if (WARN_ON(request == NULL))
-               return;
-
-       engine = request->engine;
-       dev_priv = request->i915;
-       ringbuf = request->ringbuf;
-
-       /*
-        * To ensure that this call will not fail, space for its emissions
-        * should already have been reserved in the ring buffer. Let the ring
-        * know that it is time to use that space up.
-        */
-       intel_ring_reserved_space_use(ringbuf);
-
-       request_start = intel_ring_get_tail(ringbuf);
-       /*
-        * Emit any outstanding flushes - execbuf can fail to emit the flush
-        * after having emitted the batchbuffer command. Hence we need to fix
-        * things up similar to emitting the lazy request. The difference here
-        * is that the flush _must_ happen before the next request, no matter
-        * what.
-        */
-       if (flush_caches) {
-               if (i915.enable_execlists)
-                       ret = logical_ring_flush_all_caches(request);
-               else
-                       ret = intel_ring_flush_all_caches(request);
-               /* Not allowed to fail! */
-               WARN(ret, "*_ring_flush_all_caches failed: %d!\n", ret);
-       }
-
-       trace_i915_gem_request_add(request);
-
-       request->head = request_start;
-
-       /* Whilst this request exists, batch_obj will be on the
-        * active_list, and so will hold the active reference. Only when this
-        * request is retired will the the batch_obj be moved onto the
-        * inactive_list and lose its active reference. Hence we do not need
-        * to explicitly hold another reference here.
-        */
-       request->batch_obj = obj;
-
-       /* Seal the request and mark it as pending execution. Note that
-        * we may inspect this state, without holding any locks, during
-        * hangcheck. Hence we apply the barrier to ensure that we do not
-        * see a more recent value in the hws than we are tracking.
-        */
-       request->emitted_jiffies = jiffies;
-       request->previous_seqno = engine->last_submitted_seqno;
-       smp_store_mb(engine->last_submitted_seqno, request->seqno);
-       list_add_tail(&request->list, &engine->request_list);
-
-       /* Record the position of the start of the request so that
-        * should we detect the updated seqno part-way through the
-        * GPU processing the request, we never over-estimate the
-        * position of the head.
-        */
-       request->postfix = intel_ring_get_tail(ringbuf);
-
-       if (i915.enable_execlists)
-               ret = engine->emit_request(request);
-       else {
-               ret = engine->add_request(request);
-
-               request->tail = intel_ring_get_tail(ringbuf);
-       }
-       /* Not allowed to fail! */
-       WARN(ret, "emit|add_request failed: %d!\n", ret);
-
-       i915_queue_hangcheck(engine->dev);
-
-       queue_delayed_work(dev_priv->wq,
-                          &dev_priv->mm.retire_work,
-                          round_jiffies_up_relative(HZ));
-       intel_mark_busy(dev_priv->dev);
-
-       /* Sanity check that the reserved size was large enough. */
-       intel_ring_reserved_space_end(ringbuf);
+       i915_gem_object_put(obj);
 }
 
-static bool i915_context_is_banned(struct drm_i915_private *dev_priv,
-                                  const struct intel_context *ctx)
+static bool i915_context_is_banned(const struct i915_gem_context *ctx)
 {
        unsigned long elapsed;
 
-       elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
-
-       if (ctx->hang_stats.banned)
-               return true;
-
-       if (ctx->hang_stats.ban_period_seconds &&
-           elapsed <= ctx->hang_stats.ban_period_seconds) {
-               if (!i915_gem_context_is_default(ctx)) {
-                       DRM_DEBUG("context hanging too fast, banning!\n");
-                       return true;
-               } else if (i915_stop_ring_allow_ban(dev_priv)) {
-                       if (i915_stop_ring_allow_warn(dev_priv))
-                               DRM_ERROR("gpu hanging too fast, banning!\n");
-                       return true;
-               }
-       }
-
-       return false;
-}
-
-static void i915_set_reset_status(struct drm_i915_private *dev_priv,
-                                 struct intel_context *ctx,
-                                 const bool guilty)
-{
-       struct i915_ctx_hang_stats *hs;
-
-       if (WARN_ON(!ctx))
-               return;
-
-       hs = &ctx->hang_stats;
-
-       if (guilty) {
-               hs->banned = i915_context_is_banned(dev_priv, ctx);
-               hs->batch_active++;
-               hs->guilty_ts = get_seconds();
-       } else {
-               hs->batch_pending++;
-       }
-}
-
-void i915_gem_request_free(struct kref *req_ref)
-{
-       struct drm_i915_gem_request *req = container_of(req_ref,
-                                                typeof(*req), ref);
-       struct intel_context *ctx = req->ctx;
-
-       if (req->file_priv)
-               i915_gem_request_remove_from_client(req);
-
-       if (ctx) {
-               if (i915.enable_execlists && ctx != req->i915->kernel_context)
-                       intel_lr_context_unpin(ctx, req->engine);
-
-               i915_gem_context_unreference(ctx);
-       }
-
-       kmem_cache_free(req->i915->requests, req);
-}
-
-static inline int
-__i915_gem_request_alloc(struct intel_engine_cs *engine,
-                        struct intel_context *ctx,
-                        struct drm_i915_gem_request **req_out)
-{
-       struct drm_i915_private *dev_priv = to_i915(engine->dev);
-       unsigned reset_counter = i915_reset_counter(&dev_priv->gpu_error);
-       struct drm_i915_gem_request *req;
-       int ret;
-
-       if (!req_out)
-               return -EINVAL;
-
-       *req_out = NULL;
-
-       /* ABI: Before userspace accesses the GPU (e.g. execbuffer), report
-        * EIO if the GPU is already wedged, or EAGAIN to drop the struct_mutex
-        * and restart.
-        */
-       ret = i915_gem_check_wedge(reset_counter, dev_priv->mm.interruptible);
-       if (ret)
-               return ret;
-
-       req = kmem_cache_zalloc(dev_priv->requests, GFP_KERNEL);
-       if (req == NULL)
-               return -ENOMEM;
-
-       ret = i915_gem_get_seqno(engine->dev, &req->seqno);
-       if (ret)
-               goto err;
-
-       kref_init(&req->ref);
-       req->i915 = dev_priv;
-       req->engine = engine;
-       req->reset_counter = reset_counter;
-       req->ctx  = ctx;
-       i915_gem_context_reference(req->ctx);
-
-       if (i915.enable_execlists)
-               ret = intel_logical_ring_alloc_request_extras(req);
-       else
-               ret = intel_ring_alloc_request_extras(req);
-       if (ret) {
-               i915_gem_context_unreference(req->ctx);
-               goto err;
-       }
-
-       /*
-        * Reserve space in the ring buffer for all the commands required to
-        * eventually emit this request. This is to guarantee that the
-        * i915_add_request() call can't fail. Note that the reserve may need
-        * to be redone if the request is not actually submitted straight
-        * away, e.g. because a GPU scheduler has deferred it.
-        */
-       if (i915.enable_execlists)
-               ret = intel_logical_ring_reserve_space(req);
-       else
-               ret = intel_ring_reserve_space(req);
-       if (ret) {
-               /*
-                * At this point, the request is fully allocated even if not
-                * fully prepared. Thus it can be cleaned up using the proper
-                * free code.
-                */
-               intel_ring_reserved_space_cancel(req->ringbuf);
-               i915_gem_request_unreference(req);
-               return ret;
-       }
+       if (ctx->hang_stats.banned)
+               return true;
 
-       *req_out = req;
-       return 0;
+       elapsed = get_seconds() - ctx->hang_stats.guilty_ts;
+       if (ctx->hang_stats.ban_period_seconds &&
+           elapsed <= ctx->hang_stats.ban_period_seconds) {
+               DRM_DEBUG("context hanging too fast, banning!\n");
+               return true;
+       }
 
-err:
-       kmem_cache_free(dev_priv->requests, req);
-       return ret;
+       return false;
 }
 
-/**
- * i915_gem_request_alloc - allocate a request structure
- *
- * @engine: engine that we wish to issue the request on.
- * @ctx: context that the request will be associated with.
- *       This can be NULL if the request is not directly related to
- *       any specific user context, in which case this function will
- *       choose an appropriate context to use.
- *
- * Returns a pointer to the allocated request if successful,
- * or an error code if not.
- */
-struct drm_i915_gem_request *
-i915_gem_request_alloc(struct intel_engine_cs *engine,
-                      struct intel_context *ctx)
+static void i915_set_reset_status(struct i915_gem_context *ctx,
+                                 const bool guilty)
 {
-       struct drm_i915_gem_request *req;
-       int err;
+       struct i915_ctx_hang_stats *hs = &ctx->hang_stats;
 
-       if (ctx == NULL)
-               ctx = to_i915(engine->dev)->kernel_context;
-       err = __i915_gem_request_alloc(engine, ctx, &req);
-       return err ? ERR_PTR(err) : req;
+       if (guilty) {
+               hs->banned = i915_context_is_banned(ctx);
+               hs->batch_active++;
+               hs->guilty_ts = get_seconds();
+       } else {
+               hs->batch_pending++;
+       }
 }
 
 struct drm_i915_gem_request *
@@ -2834,8 +2450,16 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *request;
 
+       /* We are called by the error capture and reset at a random
+        * point in time. In particular, note that neither is crucially
+        * ordered with an interrupt. After a hang, the GPU is dead and we
+        * assume that no more writes can happen (we waited long enough for
+        * all writes that were in transaction to be flushed) - adding an
+        * extra delay for a recent interrupt is pointless. Hence, we do
+        * not need an engine->irq_seqno_barrier() before the seqno reads.
+        */
        list_for_each_entry(request, &engine->request_list, list) {
-               if (i915_gem_request_completed(request, false))
+               if (i915_gem_request_completed(request))
                        continue;
 
                return request;
@@ -2844,29 +2468,25 @@ i915_gem_find_active_request(struct intel_engine_cs *engine)
        return NULL;
 }
 
-static void i915_gem_reset_engine_status(struct drm_i915_private *dev_priv,
-                                      struct intel_engine_cs *engine)
+static void i915_gem_reset_engine_status(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *request;
        bool ring_hung;
 
        request = i915_gem_find_active_request(engine);
-
        if (request == NULL)
                return;
 
        ring_hung = engine->hangcheck.score >= HANGCHECK_SCORE_RING_HUNG;
 
-       i915_set_reset_status(dev_priv, request->ctx, ring_hung);
-
+       i915_set_reset_status(request->ctx, ring_hung);
        list_for_each_entry_continue(request, &engine->request_list, list)
-               i915_set_reset_status(dev_priv, request->ctx, false);
+               i915_set_reset_status(request->ctx, false);
 }
 
-static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
-                                       struct intel_engine_cs *engine)
+static void i915_gem_reset_engine_cleanup(struct intel_engine_cs *engine)
 {
-       struct intel_ringbuffer *buffer;
+       struct intel_ring *ring;
 
        while (!list_empty(&engine->active_list)) {
                struct drm_i915_gem_object *obj;
@@ -2878,6 +2498,12 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
                i915_gem_object_retire__read(obj, engine->id);
        }
 
+       /* Mark all pending requests as complete so that any concurrent
+        * (lockless) lookup doesn't try and wait upon the request as we
+        * reset it.
+        */
+       intel_engine_init_seqno(engine, engine->last_submitted_seqno);
+
        /*
         * Clear the execlists queue up before freeing the requests, as those
         * are the ones that keep the context and ringbuffer backing objects
@@ -2888,13 +2514,7 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
                /* Ensure irq handler finishes or is cancelled. */
                tasklet_kill(&engine->irq_tasklet);
 
-               spin_lock_bh(&engine->execlist_lock);
-               /* list_splice_tail_init checks for empty lists */
-               list_splice_tail_init(&engine->execlist_queue,
-                                     &engine->execlist_retired_req_list);
-               spin_unlock_bh(&engine->execlist_lock);
-
-               intel_execlists_retire_requests(engine);
+               intel_execlists_cancel_requests(engine);
        }
 
        /*
@@ -2904,14 +2524,14 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
         * implicit references on things like e.g. ppgtt address spaces through
         * the request.
         */
-       while (!list_empty(&engine->request_list)) {
+       if (!list_empty(&engine->request_list)) {
                struct drm_i915_gem_request *request;
 
-               request = list_first_entry(&engine->request_list,
-                                          struct drm_i915_gem_request,
-                                          list);
+               request = list_last_entry(&engine->request_list,
+                                         struct drm_i915_gem_request,
+                                         list);
 
-               i915_gem_request_retire(request);
+               i915_gem_request_retire_upto(request);
        }
 
        /* Having flushed all requests from all queues, we know that all
@@ -2921,17 +2541,17 @@ static void i915_gem_reset_engine_cleanup(struct drm_i915_private *dev_priv,
         * upon reset is less than when we start. Do one more pass over
         * all the ringbuffers to reset last_retired_head.
         */
-       list_for_each_entry(buffer, &engine->buffers, link) {
-               buffer->last_retired_head = buffer->tail;
-               intel_ring_update_space(buffer);
+       list_for_each_entry(ring, &engine->buffers, link) {
+               ring->last_retired_head = ring->tail;
+               intel_ring_update_space(ring);
        }
 
-       intel_ring_init_seqno(engine, engine->last_submitted_seqno);
+       engine->i915->gt.active_engines &= ~intel_engine_flag(engine);
 }
 
 void i915_gem_reset(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
 
        /*
@@ -2940,10 +2560,11 @@ void i915_gem_reset(struct drm_device *dev)
         * their reference to the objects, the inspection must be done first.
         */
        for_each_engine(engine, dev_priv)
-               i915_gem_reset_engine_status(dev_priv, engine);
+               i915_gem_reset_engine_status(engine);
 
        for_each_engine(engine, dev_priv)
-               i915_gem_reset_engine_cleanup(dev_priv, engine);
+               i915_gem_reset_engine_cleanup(engine);
+       mod_delayed_work(dev_priv->wq, &dev_priv->gt.idle_work, 0);
 
        i915_gem_context_reset(dev);
 
@@ -2954,6 +2575,7 @@ void i915_gem_reset(struct drm_device *dev)
 
 /**
  * This function clears the request list as sequence numbers are passed.
+ * @engine: engine to retire requests on
  */
 void
 i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
@@ -2972,10 +2594,10 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
                                           struct drm_i915_gem_request,
                                           list);
 
-               if (!i915_gem_request_completed(request, true))
+               if (!i915_gem_request_completed(request))
                        break;
 
-               i915_gem_request_retire(request);
+               i915_gem_request_retire_upto(request);
        }
 
        /* Move any buffers on the active list that are no longer referenced
@@ -2995,84 +2617,113 @@ i915_gem_retire_requests_ring(struct intel_engine_cs *engine)
                i915_gem_object_retire__read(obj, engine->id);
        }
 
-       if (unlikely(engine->trace_irq_req &&
-                    i915_gem_request_completed(engine->trace_irq_req, true))) {
-               engine->irq_put(engine);
-               i915_gem_request_assign(&engine->trace_irq_req, NULL);
-       }
-
        WARN_ON(i915_verify_lists(engine->dev));
 }
 
-bool
-i915_gem_retire_requests(struct drm_device *dev)
+void i915_gem_retire_requests(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
-       bool idle = true;
+
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
+
+       if (dev_priv->gt.active_engines == 0)
+               return;
+
+       GEM_BUG_ON(!dev_priv->gt.awake);
 
        for_each_engine(engine, dev_priv) {
                i915_gem_retire_requests_ring(engine);
-               idle &= list_empty(&engine->request_list);
-               if (i915.enable_execlists) {
-                       spin_lock_bh(&engine->execlist_lock);
-                       idle &= list_empty(&engine->execlist_queue);
-                       spin_unlock_bh(&engine->execlist_lock);
-
-                       intel_execlists_retire_requests(engine);
-               }
+               if (list_empty(&engine->request_list))
+                       dev_priv->gt.active_engines &= ~intel_engine_flag(engine);
        }
 
-       if (idle)
-               mod_delayed_work(dev_priv->wq,
-                                  &dev_priv->mm.idle_work,
+       if (dev_priv->gt.active_engines == 0)
+               queue_delayed_work(dev_priv->wq,
+                                  &dev_priv->gt.idle_work,
                                   msecs_to_jiffies(100));
-
-       return idle;
 }
 
 static void
 i915_gem_retire_work_handler(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv), mm.retire_work.work);
-       struct drm_device *dev = dev_priv->dev;
-       bool idle;
+               container_of(work, typeof(*dev_priv), gt.retire_work.work);
+       struct drm_device *dev = &dev_priv->drm;
 
        /* Come back later if the device is busy... */
-       idle = false;
        if (mutex_trylock(&dev->struct_mutex)) {
-               idle = i915_gem_retire_requests(dev);
+               i915_gem_retire_requests(dev_priv);
                mutex_unlock(&dev->struct_mutex);
        }
-       if (!idle)
-               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work,
+
+       /* Keep the retire handler running until we are finally idle.
+        * We do not need to do this test under locking as in the worst-case
+        * we queue the retire worker once too often.
+        */
+       if (READ_ONCE(dev_priv->gt.awake)) {
+               i915_queue_hangcheck(dev_priv);
+               queue_delayed_work(dev_priv->wq,
+                                  &dev_priv->gt.retire_work,
                                   round_jiffies_up_relative(HZ));
+       }
 }
 
 static void
 i915_gem_idle_work_handler(struct work_struct *work)
 {
        struct drm_i915_private *dev_priv =
-               container_of(work, typeof(*dev_priv), mm.idle_work.work);
-       struct drm_device *dev = dev_priv->dev;
+               container_of(work, typeof(*dev_priv), gt.idle_work.work);
+       struct drm_device *dev = &dev_priv->drm;
        struct intel_engine_cs *engine;
+       unsigned int stuck_engines;
+       bool rearm_hangcheck;
+
+       if (!READ_ONCE(dev_priv->gt.awake))
+               return;
+
+       if (READ_ONCE(dev_priv->gt.active_engines))
+               return;
+
+       rearm_hangcheck =
+               cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
+
+       if (!mutex_trylock(&dev->struct_mutex)) {
+               /* Currently busy, come back later */
+               mod_delayed_work(dev_priv->wq,
+                                &dev_priv->gt.idle_work,
+                                msecs_to_jiffies(50));
+               goto out_rearm;
+       }
+
+       if (dev_priv->gt.active_engines)
+               goto out_unlock;
 
        for_each_engine(engine, dev_priv)
-               if (!list_empty(&engine->request_list))
-                       return;
+               i915_gem_batch_pool_fini(&engine->batch_pool);
 
-       /* we probably should sync with hangcheck here, using cancel_work_sync.
-        * Also locking seems to be fubar here, engine->request_list is protected
-        * by dev->struct_mutex. */
+       GEM_BUG_ON(!dev_priv->gt.awake);
+       dev_priv->gt.awake = false;
+       rearm_hangcheck = false;
 
-       intel_mark_idle(dev);
+       /* As we have disabled hangcheck, we need to unstick any waiters still
+        * hanging around. However, as we may be racing against the interrupt
+        * handler or the waiters themselves, we skip enabling the fake-irq.
+        */
+       stuck_engines = intel_kick_waiters(dev_priv);
+       if (unlikely(stuck_engines))
+               DRM_DEBUG_DRIVER("kicked stuck waiters (%x)...missed irq?\n",
+                                stuck_engines);
 
-       if (mutex_trylock(&dev->struct_mutex)) {
-               for_each_engine(engine, dev_priv)
-                       i915_gem_batch_pool_fini(&engine->batch_pool);
+       if (INTEL_GEN(dev_priv) >= 6)
+               gen6_rps_idle(dev_priv);
+       intel_runtime_pm_put(dev_priv);
+out_unlock:
+       mutex_unlock(&dev->struct_mutex);
 
-               mutex_unlock(&dev->struct_mutex);
+out_rearm:
+       if (rearm_hangcheck) {
+               GEM_BUG_ON(!dev_priv->gt.awake);
+               i915_queue_hangcheck(dev_priv);
        }
 }
 
@@ -3080,6 +2731,7 @@ i915_gem_idle_work_handler(struct work_struct *work)
  * Ensures that an object will eventually get non-busy by flushing any required
  * write domains, emitting any outstanding lazy request and retiring and
  * completed requests.
+ * @obj: object to flush
  */
 static int
 i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
@@ -3096,14 +2748,8 @@ i915_gem_object_flush_active(struct drm_i915_gem_object *obj)
                if (req == NULL)
                        continue;
 
-               if (list_empty(&req->list))
-                       goto retire;
-
-               if (i915_gem_request_completed(req, true)) {
-                       __i915_gem_request_retire__upto(req);
-retire:
+               if (i915_gem_request_completed(req))
                        i915_gem_object_retire__read(obj, i);
-               }
        }
 
        return 0;
@@ -3111,7 +2757,9 @@ retire:
 
 /**
  * i915_gem_wait_ioctl - implements DRM_IOCTL_I915_GEM_WAIT
- * @DRM_IOCTL_ARGS: standard ioctl arguments
+ * @dev: drm device pointer
+ * @data: ioctl data blob
+ * @file: drm file pointer
  *
  * Returns 0 if successful, else an error is returned with the remaining time in
  * the timeout parameter.
@@ -3147,8 +2795,8 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        if (ret)
                return ret;
 
-       obj = to_intel_bo(drm_gem_object_lookup(file, args->bo_handle));
-       if (&obj->base == NULL) {
+       obj = i915_gem_object_lookup(file, args->bo_handle);
+       if (!obj) {
                mutex_unlock(&dev->struct_mutex);
                return -ENOENT;
        }
@@ -3169,13 +2817,13 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                goto out;
        }
 
-       drm_gem_object_unreference(&obj->base);
+       i915_gem_object_put(obj);
 
        for (i = 0; i < I915_NUM_ENGINES; i++) {
                if (obj->last_read_req[i] == NULL)
                        continue;
 
-               req[n++] = i915_gem_request_reference(obj->last_read_req[i]);
+               req[n++] = i915_gem_request_get(obj->last_read_req[i]);
        }
 
        mutex_unlock(&dev->struct_mutex);
@@ -3185,63 +2833,47 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
                        ret = __i915_wait_request(req[i], true,
                                                  args->timeout_ns > 0 ? &args->timeout_ns : NULL,
                                                  to_rps_client(file));
-               i915_gem_request_unreference__unlocked(req[i]);
+               i915_gem_request_put(req[i]);
        }
        return ret;
 
 out:
-       drm_gem_object_unreference(&obj->base);
+       i915_gem_object_put(obj);
        mutex_unlock(&dev->struct_mutex);
        return ret;
 }
 
 static int
 __i915_gem_object_sync(struct drm_i915_gem_object *obj,
-                      struct intel_engine_cs *to,
-                      struct drm_i915_gem_request *from_req,
-                      struct drm_i915_gem_request **to_req)
+                      struct drm_i915_gem_request *to,
+                      struct drm_i915_gem_request *from)
 {
-       struct intel_engine_cs *from;
        int ret;
 
-       from = i915_gem_request_get_engine(from_req);
-       if (to == from)
+       if (to->engine == from->engine)
                return 0;
 
-       if (i915_gem_request_completed(from_req, true))
+       if (i915_gem_request_completed(from))
                return 0;
 
-       if (!i915_semaphore_is_enabled(obj->base.dev)) {
-               struct drm_i915_private *i915 = to_i915(obj->base.dev);
-               ret = __i915_wait_request(from_req,
-                                         i915->mm.interruptible,
+       if (!i915.semaphores) {
+               ret = __i915_wait_request(from,
+                                         from->i915->mm.interruptible,
                                          NULL,
-                                         &i915->rps.semaphores);
+                                         NO_WAITBOOST);
                if (ret)
                        return ret;
 
-               i915_gem_object_retire_request(obj, from_req);
+               i915_gem_object_retire_request(obj, from);
        } else {
-               int idx = intel_ring_sync_index(from, to);
-               u32 seqno = i915_gem_request_get_seqno(from_req);
+               int idx = intel_engine_sync_index(from->engine, to->engine);
+               u32 seqno = i915_gem_request_get_seqno(from);
 
-               WARN_ON(!to_req);
-
-               if (seqno <= from->semaphore.sync_seqno[idx])
+               if (seqno <= from->engine->semaphore.sync_seqno[idx])
                        return 0;
 
-               if (*to_req == NULL) {
-                       struct drm_i915_gem_request *req;
-
-                       req = i915_gem_request_alloc(to, NULL);
-                       if (IS_ERR(req))
-                               return PTR_ERR(req);
-
-                       *to_req = req;
-               }
-
-               trace_i915_gem_ring_sync_to(*to_req, from, from_req);
-               ret = to->semaphore.sync_to(*to_req, from, seqno);
+               trace_i915_gem_ring_sync_to(to, from);
+               ret = to->engine->semaphore.sync_to(to, from->engine, seqno);
                if (ret)
                        return ret;
 
@@ -3249,8 +2881,8 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
                 * might have just caused seqno wrap under
                 * the radar.
                 */
-               from->semaphore.sync_seqno[idx] =
-                       i915_gem_request_get_seqno(obj->last_read_req[from->id]);
+               from->engine->semaphore.sync_seqno[idx] =
+                       i915_gem_request_get_seqno(obj->last_read_req[from->engine->id]);
        }
 
        return 0;
@@ -3260,17 +2892,12 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
  * i915_gem_object_sync - sync an object to a ring.
  *
  * @obj: object which may be in use on another ring.
- * @to: ring we wish to use the object on. May be NULL.
- * @to_req: request we wish to use the object for. See below.
- *          This will be allocated and returned if a request is
- *          required but not passed in.
+ * @to: request we are wishing to use
  *
  * This code is meant to abstract object synchronization with the GPU.
- * Calling with NULL implies synchronizing the object with the CPU
- * rather than a particular GPU ring. Conceptually we serialise writes
- * between engines inside the GPU. We only allow one engine to write
- * into a buffer at any time, but multiple readers. To ensure each has
- * a coherent view of memory, we must:
+ * Conceptually we serialise writes between engines inside the GPU.
+ * We only allow one engine to write into a buffer at any time, but
+ * multiple readers. To ensure each has a coherent view of memory, we must:
  *
  * - If there is an outstanding write request to the object, the new
  *   request must wait for it to complete (either CPU or in hw, requests
@@ -3279,22 +2906,11 @@ __i915_gem_object_sync(struct drm_i915_gem_object *obj,
  * - If we are a write request (pending_write_domain is set), the new
  *   request must wait for outstanding read requests to complete.
  *
- * For CPU synchronisation (NULL to) no request is required. For syncing with
- * rings to_req must be non-NULL. However, a request does not have to be
- * pre-allocated. If *to_req is NULL and sync commands will be emitted then a
- * request will be allocated automatically and returned through *to_req. Note
- * that it is not guaranteed that commands will be emitted (because the system
- * might already be idle). Hence there is no need to create a request that
- * might never have any work submitted. Note further that if a request is
- * returned in *to_req, it is the responsibility of the caller to submit
- * that request (after potentially adding more work to it).
- *
  * Returns 0 if successful, else propagates up the lower layer error.
  */
 int
 i915_gem_object_sync(struct drm_i915_gem_object *obj,
-                    struct intel_engine_cs *to,
-                    struct drm_i915_gem_request **to_req)
+                    struct drm_i915_gem_request *to)
 {
        const bool readonly = obj->base.pending_write_domain == 0;
        struct drm_i915_gem_request *req[I915_NUM_ENGINES];
@@ -3303,9 +2919,6 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
        if (!obj->active)
                return 0;
 
-       if (to == NULL)
-               return i915_gem_object_wait_rendering(obj, readonly);
-
        n = 0;
        if (readonly) {
                if (obj->last_write_req)
@@ -3316,7 +2929,7 @@ i915_gem_object_sync(struct drm_i915_gem_object *obj,
                                req[n++] = obj->last_read_req[i];
        }
        for (i = 0; i < n; i++) {
-               ret = __i915_gem_object_sync(obj, to, req[i], to_req);
+               ret = __i915_gem_object_sync(obj, to, req[i]);
                if (ret)
                        return ret;
        }
@@ -3345,10 +2958,21 @@ static void i915_gem_object_finish_gtt(struct drm_i915_gem_object *obj)
                                            old_write_domain);
 }
 
+static void __i915_vma_iounmap(struct i915_vma *vma)
+{
+       GEM_BUG_ON(vma->pin_count);
+
+       if (vma->iomap == NULL)
+               return;
+
+       io_mapping_unmap(vma->iomap);
+       vma->iomap = NULL;
+}
+
 static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
 {
        struct drm_i915_gem_object *obj = vma->obj;
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        int ret;
 
        if (list_empty(&vma->obj_link))
@@ -3377,6 +3001,8 @@ static int __i915_vma_unbind(struct i915_vma *vma, bool wait)
                ret = i915_gem_object_put_fence(obj);
                if (ret)
                        return ret;
+
+               __i915_vma_iounmap(vma);
        }
 
        trace_i915_vma_unbind(vma);
@@ -3422,26 +3048,16 @@ int __i915_vma_unbind_no_wait(struct i915_vma *vma)
        return __i915_vma_unbind(vma, false);
 }
 
-int i915_gpu_idle(struct drm_device *dev)
+int i915_gem_wait_for_idle(struct drm_i915_private *dev_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
        struct intel_engine_cs *engine;
        int ret;
 
-       /* Flush everything onto the inactive list. */
-       for_each_engine(engine, dev_priv) {
-               if (!i915.enable_execlists) {
-                       struct drm_i915_gem_request *req;
-
-                       req = i915_gem_request_alloc(engine, NULL);
-                       if (IS_ERR(req))
-                               return PTR_ERR(req);
+       lockdep_assert_held(&dev_priv->drm.struct_mutex);
 
-                       ret = i915_switch_context(req);
-                       i915_add_request_no_flush(req);
-                       if (ret)
-                               return ret;
-               }
+       for_each_engine(engine, dev_priv) {
+               if (engine->last_context == NULL)
+                       continue;
 
                ret = intel_engine_idle(engine);
                if (ret)
@@ -3488,6 +3104,11 @@ static bool i915_gem_valid_gtt_space(struct i915_vma *vma,
 /**
  * Finds free space in the GTT aperture and binds the object or a view of it
  * there.
+ * @obj: object to bind
+ * @vm: address space to bind into
+ * @ggtt_view: global gtt view if applicable
+ * @alignment: requested alignment
+ * @flags: mask of PIN_* flags to use
  */
 static struct i915_vma *
 i915_gem_object_bind_to_vm(struct drm_i915_gem_object *obj,
@@ -3731,7 +3352,7 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
                return;
 
        if (i915_gem_clflush_object(obj, obj->pin_display))
-               i915_gem_chipset_flush(obj->base.dev);
+               i915_gem_chipset_flush(to_i915(obj->base.dev));
 
        old_write_domain = obj->base.write_domain;
        obj->base.write_domain = 0;
@@ -3745,6 +3366,8 @@ i915_gem_object_flush_cpu_write_domain(struct drm_i915_gem_object *obj)
 
 /**
  * Moves a single object to the GTT read, and possibly write domain.
+ * @obj: object to act on
+ * @write: ask for write access or read only
  *
  * This function returns when the move is complete, including waiting on
  * flushes to occur.
@@ -3759,13 +3382,13 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
        struct i915_vma *vma;
        int ret;
 
-       if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
-               return 0;
-
        ret = i915_gem_object_wait_rendering(obj, !write);
        if (ret)
                return ret;
 
+       if (obj->base.write_domain == I915_GEM_DOMAIN_GTT)
+               return 0;
+
        /* Flush and acquire obj->pages so that we are coherent through
         * direct access in memory with previous cached writes through
         * shmemfs and that our cache domain tracking remains valid.
@@ -3816,6 +3439,8 @@ i915_gem_object_set_to_gtt_domain(struct drm_i915_gem_object *obj, bool write)
 
 /**
  * Changes the cache-level of an object across all VMA.
+ * @obj: object to act on
+ * @cache_level: new cache level to set for the object
  *
  * After this function returns, the object will be in the new cache-level
  * across all GTT and the contents of the backing storage will be coherent,
@@ -3925,11 +3550,9 @@ out:
         * object is now coherent at its new cache level (with respect
         * to the access domain).
         */
-       if (obj->cache_dirty &&
-           obj->base.write_domain != I915_GEM_DOMAIN_CPU &&
-           cpu_write_needs_clflush(obj)) {
+       if (obj->cache_dirty && cpu_write_needs_clflush(obj)) {
                if (i915_gem_clflush_object(obj, true))
-                       i915_gem_chipset_flush(obj->base.dev);
+                       i915_gem_chipset_flush(to_i915(obj->base.dev));
        }
 
        return 0;
@@ -3941,8 +3564,8 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
        struct drm_i915_gem_caching *args = data;
        struct drm_i915_gem_object *obj;
 
-       obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
-       if (&obj->base == NULL)
+       obj = i915_gem_object_lookup(file, args->handle);
+       if (!obj)
                return -ENOENT;
 
        switch (obj->cache_level) {
@@ -3960,14 +3583,14 @@ int i915_gem_get_caching_ioctl(struct drm_device *dev, void *data,
                break;
        }
 
-       drm_gem_object_unreference_unlocked(&obj->base);
+       i915_gem_object_put_unlocked(obj);
        return 0;
 }
 
 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
                               struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_caching *args = data;
        struct drm_i915_gem_object *obj;
        enum i915_cache_level level;
@@ -4002,15 +3625,15 @@ int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
        if (ret)
                goto rpm_put;
 
-       obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
-       if (&obj->base == NULL) {
+       obj = i915_gem_object_lookup(file, args->handle);
+       if (!obj) {
                ret = -ENOENT;
                goto unlock;
        }
 
        ret = i915_gem_object_set_cache_level(obj, level);
 
-       drm_gem_object_unreference(&obj->base);
+       i915_gem_object_put(obj);
 unlock:
        mutex_unlock(&dev->struct_mutex);
 rpm_put:
@@ -4097,6 +3720,8 @@ i915_gem_object_unpin_from_display_plane(struct drm_i915_gem_object *obj,
 
 /**
  * Moves a single object to the CPU read, and possibly write domain.
+ * @obj: object to act on
+ * @write: requesting write or read-only access
  *
  * This function returns when the move is complete, including waiting on
  * flushes to occur.
@@ -4107,13 +3732,13 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
        uint32_t old_write_domain, old_read_domains;
        int ret;
 
-       if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
-               return 0;
-
        ret = i915_gem_object_wait_rendering(obj, !write);
        if (ret)
                return ret;
 
+       if (obj->base.write_domain == I915_GEM_DOMAIN_CPU)
+               return 0;
+
        i915_gem_object_flush_gtt_write_domain(obj);
 
        old_write_domain = obj->base.write_domain;
@@ -4159,7 +3784,7 @@ i915_gem_object_set_to_cpu_domain(struct drm_i915_gem_object *obj, bool write)
 static int
 i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_file_private *file_priv = file->driver_priv;
        unsigned long recent_enough = jiffies - DRM_I915_THROTTLE_JIFFIES;
        struct drm_i915_gem_request *request, *target = NULL;
@@ -4188,17 +3813,14 @@ i915_gem_ring_throttle(struct drm_device *dev, struct drm_file *file)
                target = request;
        }
        if (target)
-               i915_gem_request_reference(target);
+               i915_gem_request_get(target);
        spin_unlock(&file_priv->mm.lock);
 
        if (target == NULL)
                return 0;
 
        ret = __i915_wait_request(target, true, NULL, NULL);
-       if (ret == 0)
-               queue_delayed_work(dev_priv->wq, &dev_priv->mm.retire_work, 0);
-
-       i915_gem_request_unreference__unlocked(target);
+       i915_gem_request_put(target);
 
        return ret;
 }
@@ -4256,7 +3878,7 @@ i915_gem_object_do_pin(struct drm_i915_gem_object *obj,
                       uint32_t alignment,
                       uint64_t flags)
 {
-       struct drm_i915_private *dev_priv = obj->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        struct i915_vma *vma;
        unsigned bound;
        int ret;
@@ -4372,8 +3994,8 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
        if (ret)
                return ret;
 
-       obj = to_intel_bo(drm_gem_object_lookup(file, args->handle));
-       if (&obj->base == NULL) {
+       obj = i915_gem_object_lookup(file, args->handle);
+       if (!obj) {
                ret = -ENOENT;
                goto unlock;
        }
@@ -4403,7 +4025,7 @@ i915_gem_busy_ioctl(struct drm_device *dev, void *data,
        }
 
 unref:
-       drm_gem_object_unreference(&obj->base);
+       i915_gem_object_put(obj);
 unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
@@ -4420,7 +4042,7 @@ int
 i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
                       struct drm_file *file_priv)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct drm_i915_gem_madvise *args = data;
        struct drm_i915_gem_object *obj;
        int ret;
@@ -4437,8 +4059,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
        if (ret)
                return ret;
 
-       obj = to_intel_bo(drm_gem_object_lookup(file_priv, args->handle));
-       if (&obj->base == NULL) {
+       obj = i915_gem_object_lookup(file_priv, args->handle);
+       if (!obj) {
                ret = -ENOENT;
                goto unlock;
        }
@@ -4467,7 +4089,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
        args->retained = obj->madv != __I915_MADV_PURGED;
 
 out:
-       drm_gem_object_unreference(&obj->base);
+       i915_gem_object_put(obj);
 unlock:
        mutex_unlock(&dev->struct_mutex);
        return ret;
@@ -4490,7 +4112,7 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
        obj->fence_reg = I915_FENCE_REG_NONE;
        obj->madv = I915_MADV_WILLNEED;
 
-       i915_gem_info_add_obj(obj->base.dev->dev_private, obj->base.size);
+       i915_gem_info_add_obj(to_i915(obj->base.dev), obj->base.size);
 }
 
 static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
@@ -4499,21 +4121,21 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
        .put_pages = i915_gem_object_put_pages_gtt,
 };
 
-struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
+struct drm_i915_gem_object *i915_gem_object_create(struct drm_device *dev,
                                                  size_t size)
 {
        struct drm_i915_gem_object *obj;
        struct address_space *mapping;
        gfp_t mask;
+       int ret;
 
        obj = i915_gem_object_alloc(dev);
        if (obj == NULL)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
-       if (drm_gem_object_init(dev, &obj->base, size) != 0) {
-               i915_gem_object_free(obj);
-               return NULL;
-       }
+       ret = drm_gem_object_init(dev, &obj->base, size);
+       if (ret)
+               goto fail;
 
        mask = GFP_HIGHUSER | __GFP_RECLAIMABLE;
        if (IS_CRESTLINE(dev) || IS_BROADWATER(dev)) {
@@ -4550,6 +4172,11 @@ struct drm_i915_gem_object *i915_gem_alloc_object(struct drm_device *dev,
        trace_i915_gem_object_create(obj);
 
        return obj;
+
+fail:
+       i915_gem_object_free(obj);
+
+       return ERR_PTR(ret);
 }
 
 static bool discard_backing_storage(struct drm_i915_gem_object *obj)
@@ -4580,7 +4207,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
 {
        struct drm_i915_gem_object *obj = to_intel_bo(gem_obj);
        struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct i915_vma *vma, *next;
 
        intel_runtime_pm_get(dev_priv);
@@ -4591,7 +4218,7 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
                int ret;
 
                vma->pin_count = 0;
-               ret = i915_vma_unbind(vma);
+               ret = __i915_vma_unbind_no_wait(vma);
                if (WARN_ON(ret == -ERESTARTSYS)) {
                        bool was_interruptible;
 
@@ -4621,7 +4248,6 @@ void i915_gem_free_object(struct drm_gem_object *gem_obj)
        if (discard_backing_storage(obj))
                obj->madv = I915_MADV_DONTNEED;
        i915_gem_object_put_pages(obj);
-       i915_gem_object_free_mmap_offset(obj);
 
        BUG_ON(obj->pages);
 
@@ -4655,16 +4281,12 @@ struct i915_vma *i915_gem_obj_to_vma(struct drm_i915_gem_object *obj,
 struct i915_vma *i915_gem_obj_to_ggtt_view(struct drm_i915_gem_object *obj,
                                           const struct i915_ggtt_view *view)
 {
-       struct drm_device *dev = obj->base.dev;
-       struct drm_i915_private *dev_priv = to_i915(dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct i915_vma *vma;
 
-       BUG_ON(!view);
+       GEM_BUG_ON(!view);
 
        list_for_each_entry(vma, &obj->vma_list, obj_link)
-               if (vma->vm == &ggtt->base &&
-                   i915_ggtt_view_equal(&vma->ggtt_view, view))
+               if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
                        return vma;
        return NULL;
 }
@@ -4688,7 +4310,7 @@ void i915_gem_vma_destroy(struct i915_vma *vma)
 static void
 i915_gem_stop_engines(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
 
        for_each_engine(engine, dev_priv)
@@ -4698,27 +4320,48 @@ i915_gem_stop_engines(struct drm_device *dev)
 int
 i915_gem_suspend(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret = 0;
 
+       intel_suspend_gt_powersave(dev_priv);
+
        mutex_lock(&dev->struct_mutex);
-       ret = i915_gpu_idle(dev);
+
+       /* We have to flush all the executing contexts to main memory so
+        * that they can saved in the hibernation image. To ensure the last
+        * context image is coherent, we have to switch away from it. That
+        * leaves the dev_priv->kernel_context still active when
+        * we actually suspend, and its image in memory may not match the GPU
+        * state. Fortunately, the kernel_context is disposable and we do
+        * not rely on its state.
+        */
+       ret = i915_gem_switch_to_kernel_context(dev_priv);
        if (ret)
                goto err;
 
-       i915_gem_retire_requests(dev);
+       ret = i915_gem_wait_for_idle(dev_priv);
+       if (ret)
+               goto err;
 
+       i915_gem_retire_requests(dev_priv);
+
+       /* Note that rather than stopping the engines, all we have to do
+        * is assert that every RING_HEAD == RING_TAIL (all execution complete)
+        * and similar for all logical context images (to ensure they are
+        * all ready for hibernation).
+        */
        i915_gem_stop_engines(dev);
+       i915_gem_context_lost(dev_priv);
        mutex_unlock(&dev->struct_mutex);
 
        cancel_delayed_work_sync(&dev_priv->gpu_error.hangcheck_work);
-       cancel_delayed_work_sync(&dev_priv->mm.retire_work);
-       flush_delayed_work(&dev_priv->mm.idle_work);
+       cancel_delayed_work_sync(&dev_priv->gt.retire_work);
+       flush_delayed_work(&dev_priv->gt.idle_work);
 
        /* Assert that we sucessfully flushed all the work and
         * reset the GPU back to its idle, low power state.
         */
-       WARN_ON(dev_priv->mm.busy);
+       WARN_ON(dev_priv->gt.awake);
 
        return 0;
 
@@ -4727,40 +4370,26 @@ err:
        return ret;
 }
 
-int i915_gem_l3_remap(struct drm_i915_gem_request *req, int slice)
+void i915_gem_resume(struct drm_device *dev)
 {
-       struct intel_engine_cs *engine = req->engine;
-       struct drm_device *dev = engine->dev;
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       u32 *remap_info = dev_priv->l3_parity.remap_info[slice];
-       int i, ret;
-
-       if (!HAS_L3_DPF(dev) || !remap_info)
-               return 0;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
-       ret = intel_ring_begin(req, GEN7_L3LOG_SIZE / 4 * 3);
-       if (ret)
-               return ret;
+       mutex_lock(&dev->struct_mutex);
+       i915_gem_restore_gtt_mappings(dev);
 
-       /*
-        * Note: We do not worry about the concurrent register cacheline hang
-        * here because no other code should access these registers other than
-        * at initialization time.
+       /* As we didn't flush the kernel context before suspend, we cannot
+        * guarantee that the context image is complete. So let's just reset
+        * it and start again.
         */
-       for (i = 0; i < GEN7_L3LOG_SIZE / 4; i++) {
-               intel_ring_emit(engine, MI_LOAD_REGISTER_IMM(1));
-               intel_ring_emit_reg(engine, GEN7_L3LOG(slice, i));
-               intel_ring_emit(engine, remap_info[i]);
-       }
-
-       intel_ring_advance(engine);
+       if (i915.enable_execlists)
+               intel_lr_context_reset(dev_priv, dev_priv->kernel_context);
 
-       return ret;
+       mutex_unlock(&dev->struct_mutex);
 }
 
 void i915_gem_init_swizzling(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        if (INTEL_INFO(dev)->gen < 5 ||
            dev_priv->mm.bit_6_swizzle_x == I915_BIT_6_SWIZZLE_NONE)
@@ -4785,7 +4414,7 @@ void i915_gem_init_swizzling(struct drm_device *dev)
 
 static void init_unused_ring(struct drm_device *dev, u32 base)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
 
        I915_WRITE(RING_CTL(base), 0);
        I915_WRITE(RING_HEAD(base), 0);
@@ -4810,59 +4439,12 @@ static void init_unused_rings(struct drm_device *dev)
        }
 }
 
-int i915_gem_init_engines(struct drm_device *dev)
-{
-       struct drm_i915_private *dev_priv = dev->dev_private;
-       int ret;
-
-       ret = intel_init_render_ring_buffer(dev);
-       if (ret)
-               return ret;
-
-       if (HAS_BSD(dev)) {
-               ret = intel_init_bsd_ring_buffer(dev);
-               if (ret)
-                       goto cleanup_render_ring;
-       }
-
-       if (HAS_BLT(dev)) {
-               ret = intel_init_blt_ring_buffer(dev);
-               if (ret)
-                       goto cleanup_bsd_ring;
-       }
-
-       if (HAS_VEBOX(dev)) {
-               ret = intel_init_vebox_ring_buffer(dev);
-               if (ret)
-                       goto cleanup_blt_ring;
-       }
-
-       if (HAS_BSD2(dev)) {
-               ret = intel_init_bsd2_ring_buffer(dev);
-               if (ret)
-                       goto cleanup_vebox_ring;
-       }
-
-       return 0;
-
-cleanup_vebox_ring:
-       intel_cleanup_engine(&dev_priv->engine[VECS]);
-cleanup_blt_ring:
-       intel_cleanup_engine(&dev_priv->engine[BCS]);
-cleanup_bsd_ring:
-       intel_cleanup_engine(&dev_priv->engine[VCS]);
-cleanup_render_ring:
-       intel_cleanup_engine(&dev_priv->engine[RCS]);
-
-       return ret;
-}
-
 int
 i915_gem_init_hw(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
-       int ret, j;
+       int ret;
 
        /* Double layer security blanket, see i915_gem_init() */
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
@@ -4914,82 +4496,49 @@ i915_gem_init_hw(struct drm_device *dev)
        intel_mocs_init_l3cc_table(dev);
 
        /* We can't enable contexts until all firmware is loaded */
-       if (HAS_GUC_UCODE(dev)) {
-               ret = intel_guc_ucode_load(dev);
-               if (ret) {
-                       DRM_ERROR("Failed to initialize GuC, error %d\n", ret);
-                       ret = -EIO;
-                       goto out;
-               }
-       }
-
-       /*
-        * Increment the next seqno by 0x100 so we have a visible break
-        * on re-initialisation
-        */
-       ret = i915_gem_set_seqno(dev, dev_priv->next_seqno+0x100);
+       ret = intel_guc_setup(dev);
        if (ret)
                goto out;
 
-       /* Now it is safe to go back round and do everything else: */
-       for_each_engine(engine, dev_priv) {
-               struct drm_i915_gem_request *req;
-
-               req = i915_gem_request_alloc(engine, NULL);
-               if (IS_ERR(req)) {
-                       ret = PTR_ERR(req);
-                       break;
-               }
+out:
+       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
+       return ret;
+}
 
-               if (engine->id == RCS) {
-                       for (j = 0; j < NUM_L3_SLICES(dev); j++) {
-                               ret = i915_gem_l3_remap(req, j);
-                               if (ret)
-                                       goto err_request;
-                       }
-               }
+bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
+{
+       if (INTEL_INFO(dev_priv)->gen < 6)
+               return false;
 
-               ret = i915_ppgtt_init_ring(req);
-               if (ret)
-                       goto err_request;
+       /* TODO: make semaphores and Execlists play nicely together */
+       if (i915.enable_execlists)
+               return false;
 
-               ret = i915_gem_context_enable(req);
-               if (ret)
-                       goto err_request;
+       if (value >= 0)
+               return value;
 
-err_request:
-               i915_add_request_no_flush(req);
-               if (ret) {
-                       DRM_ERROR("Failed to enable %s, error=%d\n",
-                                 engine->name, ret);
-                       i915_gem_cleanup_engines(dev);
-                       break;
-               }
-       }
+#ifdef CONFIG_INTEL_IOMMU
+       /* Enable semaphores on SNB when IO remapping is off */
+       if (INTEL_INFO(dev_priv)->gen == 6 && intel_iommu_gfx_mapped)
+               return false;
+#endif
 
-out:
-       intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
-       return ret;
+       return true;
 }
 
 int i915_gem_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int ret;
 
-       i915.enable_execlists = intel_sanitize_enable_execlists(dev,
-                       i915.enable_execlists);
-
        mutex_lock(&dev->struct_mutex);
 
        if (!i915.enable_execlists) {
                dev_priv->gt.execbuf_submit = i915_gem_ringbuffer_submission;
-               dev_priv->gt.init_engines = i915_gem_init_engines;
-               dev_priv->gt.cleanup_engine = intel_cleanup_engine;
-               dev_priv->gt.stop_engine = intel_stop_engine;
+               dev_priv->gt.cleanup_engine = intel_engine_cleanup;
+               dev_priv->gt.stop_engine = intel_engine_stop;
        } else {
                dev_priv->gt.execbuf_submit = intel_execlists_submission;
-               dev_priv->gt.init_engines = intel_logical_rings_init;
                dev_priv->gt.cleanup_engine = intel_logical_ring_cleanup;
                dev_priv->gt.stop_engine = intel_logical_ring_stop;
        }
@@ -5002,23 +4551,20 @@ int i915_gem_init(struct drm_device *dev)
         */
        intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
 
-       ret = i915_gem_init_userptr(dev);
-       if (ret)
-               goto out_unlock;
-
+       i915_gem_init_userptr(dev_priv);
        i915_gem_init_ggtt(dev);
 
        ret = i915_gem_context_init(dev);
        if (ret)
                goto out_unlock;
 
-       ret = dev_priv->gt.init_engines(dev);
+       ret = intel_engines_init(dev);
        if (ret)
                goto out_unlock;
 
        ret = i915_gem_init_hw(dev);
        if (ret == -EIO) {
-               /* Allow ring initialisation to fail by marking the GPU as
+               /* Allow engine initialisation to fail by marking the GPU as
                 * wedged. But we only want to do this where the GPU is angry,
                 * for all other failure, such as an allocation failure, bail.
                 */
@@ -5037,19 +4583,11 @@ out_unlock:
 void
 i915_gem_cleanup_engines(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        struct intel_engine_cs *engine;
 
        for_each_engine(engine, dev_priv)
                dev_priv->gt.cleanup_engine(engine);
-
-       if (i915.enable_execlists)
-               /*
-                * Neither the BIOS, ourselves or any other kernel
-                * expects the system to be in execlists mode on startup,
-                * so we need to reset the GPU back to legacy mode.
-                */
-               intel_gpu_reset(dev, ALL_ENGINES);
 }
 
 static void
@@ -5062,7 +4600,7 @@ init_engine_lists(struct intel_engine_cs *engine)
 void
 i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
 {
-       struct drm_device *dev = dev_priv->dev;
+       struct drm_device *dev = &dev_priv->drm;
 
        if (INTEL_INFO(dev_priv)->gen >= 7 && !IS_VALLEYVIEW(dev_priv) &&
            !IS_CHERRYVIEW(dev_priv))
@@ -5073,7 +4611,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
        else
                dev_priv->num_fence_regs = 8;
 
-       if (intel_vgpu_active(dev))
+       if (intel_vgpu_active(dev_priv))
                dev_priv->num_fence_regs =
                                I915_READ(vgtif_reg(avail_rs.fence_num));
 
@@ -5086,7 +4624,7 @@ i915_gem_load_init_fences(struct drm_i915_private *dev_priv)
 void
 i915_gem_load_init(struct drm_device *dev)
 {
-       struct drm_i915_private *dev_priv = dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(dev);
        int i;
 
        dev_priv->objects =
@@ -5114,22 +4652,15 @@ i915_gem_load_init(struct drm_device *dev)
                init_engine_lists(&dev_priv->engine[i]);
        for (i = 0; i < I915_MAX_NUM_FENCES; i++)
                INIT_LIST_HEAD(&dev_priv->fence_regs[i].lru_list);
-       INIT_DELAYED_WORK(&dev_priv->mm.retire_work,
+       INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
                          i915_gem_retire_work_handler);
-       INIT_DELAYED_WORK(&dev_priv->mm.idle_work,
+       INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
                          i915_gem_idle_work_handler);
+       init_waitqueue_head(&dev_priv->gpu_error.wait_queue);
        init_waitqueue_head(&dev_priv->gpu_error.reset_queue);
 
        dev_priv->relative_constants_mode = I915_EXEC_CONSTANTS_REL_GENERAL;
 
-       /*
-        * Set initial sequence number for requests.
-        * Using this number allows the wraparound to happen early,
-        * catching any obvious problems.
-        */
-       dev_priv->next_seqno = ((u32)~0 - 0x1100);
-       dev_priv->last_seqno = ((u32)~0 - 0x1101);
-
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
 
        init_waitqueue_head(&dev_priv->pending_flip_queue);
@@ -5148,24 +4679,46 @@ void i915_gem_load_cleanup(struct drm_device *dev)
        kmem_cache_destroy(dev_priv->objects);
 }
 
+int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
+{
+       struct drm_i915_gem_object *obj;
+
+       /* Called just before we write the hibernation image.
+        *
+        * We need to update the domain tracking to reflect that the CPU
+        * will be accessing all the pages to create and restore from the
+        * hibernation, and so upon restoration those pages will be in the
+        * CPU domain.
+        *
+        * To make sure the hibernation image contains the latest state,
+        * we update that state just before writing out the image.
+        */
+
+       list_for_each_entry(obj, &dev_priv->mm.unbound_list, global_list) {
+               obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+               obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+       }
+
+       list_for_each_entry(obj, &dev_priv->mm.bound_list, global_list) {
+               obj->base.read_domains = I915_GEM_DOMAIN_CPU;
+               obj->base.write_domain = I915_GEM_DOMAIN_CPU;
+       }
+
+       return 0;
+}
+
 void i915_gem_release(struct drm_device *dev, struct drm_file *file)
 {
        struct drm_i915_file_private *file_priv = file->driver_priv;
+       struct drm_i915_gem_request *request;
 
        /* Clean up our request list when the client is going away, so that
         * later retire_requests won't dereference our soon-to-be-gone
         * file_priv.
         */
        spin_lock(&file_priv->mm.lock);
-       while (!list_empty(&file_priv->mm.request_list)) {
-               struct drm_i915_gem_request *request;
-
-               request = list_first_entry(&file_priv->mm.request_list,
-                                          struct drm_i915_gem_request,
-                                          client_list);
-               list_del(&request->client_list);
+       list_for_each_entry(request, &file_priv->mm.request_list, client_list)
                request->file_priv = NULL;
-       }
        spin_unlock(&file_priv->mm.lock);
 
        if (!list_empty(&file_priv->rps.link)) {
@@ -5187,14 +4740,14 @@ int i915_gem_open(struct drm_device *dev, struct drm_file *file)
                return -ENOMEM;
 
        file->driver_priv = file_priv;
-       file_priv->dev_priv = dev->dev_private;
+       file_priv->dev_priv = to_i915(dev);
        file_priv->file = file;
        INIT_LIST_HEAD(&file_priv->rps.link);
 
        spin_lock_init(&file_priv->mm.lock);
        INIT_LIST_HEAD(&file_priv->mm.request_list);
 
-       file_priv->bsd_ring = -1;
+       file_priv->bsd_engine = -1;
 
        ret = i915_gem_context_open(dev, file);
        if (ret)
@@ -5233,7 +4786,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
 u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
                        struct i915_address_space *vm)
 {
-       struct drm_i915_private *dev_priv = o->base.dev->dev_private;
+       struct drm_i915_private *dev_priv = to_i915(o->base.dev);
        struct i915_vma *vma;
 
        WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
@@ -5254,13 +4807,10 @@ u64 i915_gem_obj_offset(struct drm_i915_gem_object *o,
 u64 i915_gem_obj_ggtt_offset_view(struct drm_i915_gem_object *o,
                                  const struct i915_ggtt_view *view)
 {
-       struct drm_i915_private *dev_priv = to_i915(o->base.dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct i915_vma *vma;
 
        list_for_each_entry(vma, &o->vma_list, obj_link)
-               if (vma->vm == &ggtt->base &&
-                   i915_ggtt_view_equal(&vma->ggtt_view, view))
+               if (vma->is_ggtt && i915_ggtt_view_equal(&vma->ggtt_view, view))
                        return vma->node.start;
 
        WARN(1, "global vma for this object not found. (view=%u)\n", view->type);
@@ -5286,12 +4836,10 @@ bool i915_gem_obj_bound(struct drm_i915_gem_object *o,
 bool i915_gem_obj_ggtt_bound_view(struct drm_i915_gem_object *o,
                                  const struct i915_ggtt_view *view)
 {
-       struct drm_i915_private *dev_priv = to_i915(o->base.dev);
-       struct i915_ggtt *ggtt = &dev_priv->ggtt;
        struct i915_vma *vma;
 
        list_for_each_entry(vma, &o->vma_list, obj_link)
-               if (vma->vm == &ggtt->base &&
+               if (vma->is_ggtt &&
                    i915_ggtt_view_equal(&vma->ggtt_view, view) &&
                    drm_mm_node_allocated(&vma->node))
                        return true;
@@ -5310,23 +4858,18 @@ bool i915_gem_obj_bound_any(struct drm_i915_gem_object *o)
        return false;
 }
 
-unsigned long i915_gem_obj_size(struct drm_i915_gem_object *o,
-                               struct i915_address_space *vm)
+unsigned long i915_gem_obj_ggtt_size(struct drm_i915_gem_object *o)
 {
-       struct drm_i915_private *dev_priv = o->base.dev->dev_private;
        struct i915_vma *vma;
 
-       WARN_ON(vm == &dev_priv->mm.aliasing_ppgtt->base);
-
-       BUG_ON(list_empty(&o->vma_list));
+       GEM_BUG_ON(list_empty(&o->vma_list));
 
        list_for_each_entry(vma, &o->vma_list, obj_link) {
                if (vma->is_ggtt &&
-                   vma->ggtt_view.type != I915_GGTT_VIEW_NORMAL)
-                       continue;
-               if (vma->vm == vm)
+                   vma->ggtt_view.type == I915_GGTT_VIEW_NORMAL)
                        return vma->node.size;
        }
+
        return 0;
 }
 
@@ -5347,7 +4890,7 @@ i915_gem_object_get_dirty_page(struct drm_i915_gem_object *obj, int n)
        struct page *page;
 
        /* Only default objects have per-page dirty tracking */
-       if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
+       if (WARN_ON(!i915_gem_object_has_struct_page(obj)))
                return NULL;
 
        page = i915_gem_object_get_page(obj, n);
@@ -5365,8 +4908,8 @@ i915_gem_object_create_from_data(struct drm_device *dev,
        size_t bytes;
        int ret;
 
-       obj = i915_gem_alloc_object(dev, round_up(size, PAGE_SIZE));
-       if (IS_ERR_OR_NULL(obj))
+       obj = i915_gem_object_create(dev, round_up(size, PAGE_SIZE));
+       if (IS_ERR(obj))
                return obj;
 
        ret = i915_gem_object_set_to_cpu_domain(obj, true);
@@ -5392,6 +4935,6 @@ i915_gem_object_create_from_data(struct drm_device *dev,
        return obj;
 
 fail:
-       drm_gem_object_unreference(&obj->base);
+       i915_gem_object_put(obj);
        return ERR_PTR(ret);
 }