Merge tag 'drm-for-v4.15' of git://people.freedesktop.org/~airlied/linux
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_gem.c
index dc1faa49687d148876b1089e517af3e66e110d6a..3a140eedfc83079b734c39cea63b85932d002159 100644 (file)
@@ -35,6 +35,7 @@
 #include "intel_drv.h"
 #include "intel_frontbuffer.h"
 #include "intel_mocs.h"
+#include "i915_gemfs.h"
 #include <linux/dma-fence-array.h>
 #include <linux/kthread.h>
 #include <linux/reservation.h>
@@ -55,7 +56,7 @@ static bool cpu_write_needs_clflush(struct drm_i915_gem_object *obj)
        if (!(obj->cache_coherent & I915_BO_CACHE_COHERENT_FOR_WRITE))
                return true;
 
-       return obj->pin_display;
+       return obj->pin_global; /* currently in use by HW, keep flushed */
 }
 
 static int
@@ -161,8 +162,7 @@ i915_gem_get_aperture_ioctl(struct drm_device *dev, void *data,
        return 0;
 }
 
-static struct sg_table *
-i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 {
        struct address_space *mapping = obj->base.filp->f_mapping;
        drm_dma_handle_t *phys;
@@ -170,19 +170,20 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
        struct scatterlist *sg;
        char *vaddr;
        int i;
+       int err;
 
        if (WARN_ON(i915_gem_object_needs_bit17_swizzle(obj)))
-               return ERR_PTR(-EINVAL);
+               return -EINVAL;
 
        /* Always aligning to the object size, allows a single allocation
         * to handle all possible callers, and given typical object sizes,
         * the alignment of the buddy allocation will naturally match.
         */
        phys = drm_pci_alloc(obj->base.dev,
-                            obj->base.size,
+                            roundup_pow_of_two(obj->base.size),
                             roundup_pow_of_two(obj->base.size));
        if (!phys)
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
 
        vaddr = phys->vaddr;
        for (i = 0; i < obj->base.size / PAGE_SIZE; i++) {
@@ -191,7 +192,7 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 
                page = shmem_read_mapping_page(mapping, i);
                if (IS_ERR(page)) {
-                       st = ERR_CAST(page);
+                       err = PTR_ERR(page);
                        goto err_phys;
                }
 
@@ -208,13 +209,13 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
 
        st = kmalloc(sizeof(*st), GFP_KERNEL);
        if (!st) {
-               st = ERR_PTR(-ENOMEM);
+               err = -ENOMEM;
                goto err_phys;
        }
 
        if (sg_alloc_table(st, 1, GFP_KERNEL)) {
                kfree(st);
-               st = ERR_PTR(-ENOMEM);
+               err = -ENOMEM;
                goto err_phys;
        }
 
@@ -226,11 +227,15 @@ i915_gem_object_get_pages_phys(struct drm_i915_gem_object *obj)
        sg_dma_len(sg) = obj->base.size;
 
        obj->phys_handle = phys;
-       return st;
+
+       __i915_gem_object_set_pages(obj, st, sg->length);
+
+       return 0;
 
 err_phys:
        drm_pci_free(obj->base.dev, phys);
-       return st;
+
+       return err;
 }
 
 static void __start_cpu_write(struct drm_i915_gem_object *obj)
@@ -353,7 +358,7 @@ static long
 i915_gem_object_wait_fence(struct dma_fence *fence,
                           unsigned int flags,
                           long timeout,
-                          struct intel_rps_client *rps)
+                          struct intel_rps_client *rps_client)
 {
        struct drm_i915_gem_request *rq;
 
@@ -386,11 +391,11 @@ i915_gem_object_wait_fence(struct dma_fence *fence,
         * forcing the clocks too high for the whole system, we only allow
         * each client to waitboost once in a busy period.
         */
-       if (rps) {
+       if (rps_client) {
                if (INTEL_GEN(rq->i915) >= 6)
-                       gen6_rps_boost(rq, rps);
+                       gen6_rps_boost(rq, rps_client);
                else
-                       rps = NULL;
+                       rps_client = NULL;
        }
 
        timeout = i915_wait_request(rq, flags, timeout);
@@ -406,7 +411,7 @@ static long
 i915_gem_object_wait_reservation(struct reservation_object *resv,
                                 unsigned int flags,
                                 long timeout,
-                                struct intel_rps_client *rps)
+                                struct intel_rps_client *rps_client)
 {
        unsigned int seq = __read_seqcount_begin(&resv->seq);
        struct dma_fence *excl;
@@ -425,7 +430,7 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
                for (i = 0; i < count; i++) {
                        timeout = i915_gem_object_wait_fence(shared[i],
                                                             flags, timeout,
-                                                            rps);
+                                                            rps_client);
                        if (timeout < 0)
                                break;
 
@@ -442,7 +447,8 @@ i915_gem_object_wait_reservation(struct reservation_object *resv,
        }
 
        if (excl && timeout >= 0) {
-               timeout = i915_gem_object_wait_fence(excl, flags, timeout, rps);
+               timeout = i915_gem_object_wait_fence(excl, flags, timeout,
+                                                    rps_client);
                prune_fences = timeout >= 0;
        }
 
@@ -538,7 +544,7 @@ int
 i915_gem_object_wait(struct drm_i915_gem_object *obj,
                     unsigned int flags,
                     long timeout,
-                    struct intel_rps_client *rps)
+                    struct intel_rps_client *rps_client)
 {
        might_sleep();
 #if IS_ENABLED(CONFIG_LOCKDEP)
@@ -550,7 +556,7 @@ i915_gem_object_wait(struct drm_i915_gem_object *obj,
 
        timeout = i915_gem_object_wait_reservation(obj->resv,
                                                   flags, timeout,
-                                                  rps);
+                                                  rps_client);
        return timeout < 0 ? timeout : 0;
 }
 
@@ -558,7 +564,7 @@ static struct intel_rps_client *to_rps_client(struct drm_file *file)
 {
        struct drm_i915_file_private *fpriv = file->driver_priv;
 
-       return &fpriv->rps;
+       return &fpriv->rps_client;
 }
 
 static int
@@ -694,10 +700,10 @@ flush_write_domain(struct drm_i915_gem_object *obj, unsigned int flush_domains)
 
        switch (obj->base.write_domain) {
        case I915_GEM_DOMAIN_GTT:
-               if (INTEL_GEN(dev_priv) >= 6 && !HAS_LLC(dev_priv)) {
+               if (!HAS_LLC(dev_priv)) {
                        intel_runtime_pm_get(dev_priv);
                        spin_lock_irq(&dev_priv->uncore.lock);
-                       POSTING_READ_FW(RING_ACTHD(dev_priv->engine[RCS]->mmio_base));
+                       POSTING_READ_FW(RING_HEAD(dev_priv->engine[RCS]->mmio_base));
                        spin_unlock_irq(&dev_priv->uncore.lock);
                        intel_runtime_pm_put(dev_priv);
                }
@@ -1013,17 +1019,20 @@ gtt_user_read(struct io_mapping *mapping,
              loff_t base, int offset,
              char __user *user_data, int length)
 {
-       void *vaddr;
+       void __iomem *vaddr;
        unsigned long unwritten;
 
        /* We can use the cpu mem copy function because this is X86. */
-       vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
-       unwritten = __copy_to_user_inatomic(user_data, vaddr + offset, length);
+       vaddr = io_mapping_map_atomic_wc(mapping, base);
+       unwritten = __copy_to_user_inatomic(user_data,
+                                           (void __force *)vaddr + offset,
+                                           length);
        io_mapping_unmap_atomic(vaddr);
        if (unwritten) {
-               vaddr = (void __force *)
-                       io_mapping_map_wc(mapping, base, PAGE_SIZE);
-               unwritten = copy_to_user(user_data, vaddr + offset, length);
+               vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
+               unwritten = copy_to_user(user_data,
+                                        (void __force *)vaddr + offset,
+                                        length);
                io_mapping_unmap(vaddr);
        }
        return unwritten;
@@ -1047,7 +1056,9 @@ i915_gem_gtt_pread(struct drm_i915_gem_object *obj,
 
        intel_runtime_pm_get(i915);
        vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
-                                      PIN_MAPPABLE | PIN_NONBLOCK);
+                                      PIN_MAPPABLE |
+                                      PIN_NONFAULT |
+                                      PIN_NONBLOCK);
        if (!IS_ERR(vma)) {
                node.start = i915_ggtt_offset(vma);
                node.allocated = false;
@@ -1189,18 +1200,18 @@ ggtt_write(struct io_mapping *mapping,
           loff_t base, int offset,
           char __user *user_data, int length)
 {
-       void *vaddr;
+       void __iomem *vaddr;
        unsigned long unwritten;
 
        /* We can use the cpu mem copy function because this is X86. */
-       vaddr = (void __force *)io_mapping_map_atomic_wc(mapping, base);
-       unwritten = __copy_from_user_inatomic_nocache(vaddr + offset,
+       vaddr = io_mapping_map_atomic_wc(mapping, base);
+       unwritten = __copy_from_user_inatomic_nocache((void __force *)vaddr + offset,
                                                      user_data, length);
        io_mapping_unmap_atomic(vaddr);
        if (unwritten) {
-               vaddr = (void __force *)
-                       io_mapping_map_wc(mapping, base, PAGE_SIZE);
-               unwritten = copy_from_user(vaddr + offset, user_data, length);
+               vaddr = io_mapping_map_wc(mapping, base, PAGE_SIZE);
+               unwritten = copy_from_user((void __force *)vaddr + offset,
+                                          user_data, length);
                io_mapping_unmap(vaddr);
        }
 
@@ -1229,9 +1240,27 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
        if (ret)
                return ret;
 
-       intel_runtime_pm_get(i915);
+       if (i915_gem_object_has_struct_page(obj)) {
+               /*
+                * Avoid waking the device up if we can fallback, as
+                * waking/resuming is very slow (worst-case 10-100 ms
+                * depending on PCI sleeps and our own resume time).
+                * This easily dwarfs any performance advantage from
+                * using the cache bypass of indirect GGTT access.
+                */
+               if (!intel_runtime_pm_get_if_in_use(i915)) {
+                       ret = -EFAULT;
+                       goto out_unlock;
+               }
+       } else {
+               /* No backing pages, no fallback, we must force GGTT access */
+               intel_runtime_pm_get(i915);
+       }
+
        vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0,
-                                      PIN_MAPPABLE | PIN_NONBLOCK);
+                                      PIN_MAPPABLE |
+                                      PIN_NONFAULT |
+                                      PIN_NONBLOCK);
        if (!IS_ERR(vma)) {
                node.start = i915_ggtt_offset(vma);
                node.allocated = false;
@@ -1244,7 +1273,7 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_gem_object *obj,
        if (IS_ERR(vma)) {
                ret = insert_mappable_node(ggtt, &node, PAGE_SIZE);
                if (ret)
-                       goto out_unlock;
+                       goto out_rpm;
                GEM_BUG_ON(!node.allocated);
        }
 
@@ -1307,8 +1336,9 @@ out_unpin:
        } else {
                i915_vma_unpin(vma);
        }
-out_unlock:
+out_rpm:
        intel_runtime_pm_put(i915);
+out_unlock:
        mutex_unlock(&i915->drm.struct_mutex);
        return ret;
 }
@@ -1524,6 +1554,8 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
        struct list_head *list;
        struct i915_vma *vma;
 
+       GEM_BUG_ON(!i915_gem_object_has_pinned_pages(obj));
+
        list_for_each_entry(vma, &obj->vma_list, obj_link) {
                if (!i915_vma_is_ggtt(vma))
                        break;
@@ -1538,8 +1570,10 @@ static void i915_gem_object_bump_inactive_ggtt(struct drm_i915_gem_object *obj)
        }
 
        i915 = to_i915(obj->base.dev);
+       spin_lock(&i915->mm.obj_lock);
        list = obj->bind_count ? &i915->mm.bound_list : &i915->mm.unbound_list;
-       list_move_tail(&obj->global_link, list);
+       list_move_tail(&obj->mm.link, list);
+       spin_unlock(&i915->mm.obj_lock);
 }
 
 /**
@@ -1902,22 +1936,27 @@ int i915_gem_fault(struct vm_fault *vmf)
        if (ret)
                goto err_unpin;
 
-       ret = i915_vma_get_fence(vma);
+       ret = i915_vma_pin_fence(vma);
        if (ret)
                goto err_unpin;
 
-       /* Mark as being mmapped into userspace for later revocation */
-       assert_rpm_wakelock_held(dev_priv);
-       if (list_empty(&obj->userfault_link))
-               list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
-
        /* Finally, remap it using the new GTT offset */
        ret = remap_io_mapping(area,
                               area->vm_start + (vma->ggtt_view.partial.offset << PAGE_SHIFT),
                               (ggtt->mappable_base + vma->node.start) >> PAGE_SHIFT,
                               min_t(u64, vma->size, area->vm_end - area->vm_start),
                               &ggtt->mappable);
+       if (ret)
+               goto err_fence;
 
+       /* Mark as being mmapped into userspace for later revocation */
+       assert_rpm_wakelock_held(dev_priv);
+       if (!i915_vma_set_userfault(vma) && !obj->userfault_count++)
+               list_add(&obj->userfault_link, &dev_priv->mm.userfault_list);
+       GEM_BUG_ON(!obj->userfault_count);
+
+err_fence:
+       i915_vma_unpin_fence(vma);
 err_unpin:
        __i915_vma_unpin(vma);
 err_unlock:
@@ -1969,6 +2008,25 @@ err:
        return ret;
 }
 
+static void __i915_gem_object_release_mmap(struct drm_i915_gem_object *obj)
+{
+       struct i915_vma *vma;
+
+       GEM_BUG_ON(!obj->userfault_count);
+
+       obj->userfault_count = 0;
+       list_del(&obj->userfault_link);
+       drm_vma_node_unmap(&obj->base.vma_node,
+                          obj->base.dev->anon_inode->i_mapping);
+
+       list_for_each_entry(vma, &obj->vma_list, obj_link) {
+               if (!i915_vma_is_ggtt(vma))
+                       break;
+
+               i915_vma_unset_userfault(vma);
+       }
+}
+
 /**
  * i915_gem_release_mmap - remove physical page mappings
  * @obj: obj in question
@@ -1999,12 +2057,10 @@ i915_gem_release_mmap(struct drm_i915_gem_object *obj)
        lockdep_assert_held(&i915->drm.struct_mutex);
        intel_runtime_pm_get(i915);
 
-       if (list_empty(&obj->userfault_link))
+       if (!obj->userfault_count)
                goto out;
 
-       list_del_init(&obj->userfault_link);
-       drm_vma_node_unmap(&obj->base.vma_node,
-                          obj->base.dev->anon_inode->i_mapping);
+       __i915_gem_object_release_mmap(obj);
 
        /* Ensure that the CPU's PTE are revoked and there are not outstanding
         * memory transactions from userspace before we return. The TLB
@@ -2032,11 +2088,8 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
         */
 
        list_for_each_entry_safe(obj, on,
-                                &dev_priv->mm.userfault_list, userfault_link) {
-               list_del_init(&obj->userfault_link);
-               drm_vma_node_unmap(&obj->base.vma_node,
-                                  obj->base.dev->anon_inode->i_mapping);
-       }
+                                &dev_priv->mm.userfault_list, userfault_link)
+               __i915_gem_object_release_mmap(obj);
 
        /* The fence will be lost when the device powers down. If any were
         * in use by hardware (i.e. they are pinned), we should not be powering
@@ -2059,7 +2112,7 @@ void i915_gem_runtime_suspend(struct drm_i915_private *dev_priv)
                if (!reg->vma)
                        continue;
 
-               GEM_BUG_ON(!list_empty(&reg->vma->obj->userfault_link));
+               GEM_BUG_ON(i915_vma_has_userfault(reg->vma));
                reg->dirty = true;
        }
 }
@@ -2164,7 +2217,7 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
        struct address_space *mapping;
 
        lockdep_assert_held(&obj->mm.lock);
-       GEM_BUG_ON(obj->mm.pages);
+       GEM_BUG_ON(i915_gem_object_has_pages(obj));
 
        switch (obj->mm.madv) {
        case I915_MADV_DONTNEED:
@@ -2223,13 +2276,14 @@ static void __i915_gem_object_reset_page_iter(struct drm_i915_gem_object *obj)
 void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
                                 enum i915_mm_subclass subclass)
 {
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
        struct sg_table *pages;
 
        if (i915_gem_object_has_pinned_pages(obj))
                return;
 
        GEM_BUG_ON(obj->bind_count);
-       if (!READ_ONCE(obj->mm.pages))
+       if (!i915_gem_object_has_pages(obj))
                return;
 
        /* May be called by shrinker from within get_pages() (on another bo) */
@@ -2243,6 +2297,10 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
        pages = fetch_and_zero(&obj->mm.pages);
        GEM_BUG_ON(!pages);
 
+       spin_lock(&i915->mm.obj_lock);
+       list_del(&obj->mm.link);
+       spin_unlock(&i915->mm.obj_lock);
+
        if (obj->mm.mapping) {
                void *ptr;
 
@@ -2260,6 +2318,8 @@ void __i915_gem_object_put_pages(struct drm_i915_gem_object *obj,
        if (!IS_ERR(pages))
                obj->ops->put_pages(obj, pages);
 
+       obj->mm.page_sizes.phys = obj->mm.page_sizes.sg = 0;
+
 unlock:
        mutex_unlock(&obj->mm.lock);
 }
@@ -2290,8 +2350,7 @@ static bool i915_sg_trim(struct sg_table *orig_st)
        return true;
 }
 
-static struct sg_table *
-i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
+static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
 {
        struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
        const unsigned long page_count = obj->base.size / PAGE_SIZE;
@@ -2302,7 +2361,8 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        struct sgt_iter sgt_iter;
        struct page *page;
        unsigned long last_pfn = 0;     /* suppress gcc warning */
-       unsigned int max_segment;
+       unsigned int max_segment = i915_sg_segment_size();
+       unsigned int sg_page_sizes;
        gfp_t noreclaim;
        int ret;
 
@@ -2313,18 +2373,14 @@ i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        GEM_BUG_ON(obj->base.read_domains & I915_GEM_GPU_DOMAINS);
        GEM_BUG_ON(obj->base.write_domain & I915_GEM_GPU_DOMAINS);
 
-       max_segment = swiotlb_max_segment();
-       if (!max_segment)
-               max_segment = rounddown(UINT_MAX, PAGE_SIZE);
-
        st = kmalloc(sizeof(*st), GFP_KERNEL);
        if (st == NULL)
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
 
 rebuild_st:
        if (sg_alloc_table(st, page_count, GFP_KERNEL)) {
                kfree(st);
-               return ERR_PTR(-ENOMEM);
+               return -ENOMEM;
        }
 
        /* Get the list of pages out of our struct file.  They'll be pinned
@@ -2338,6 +2394,7 @@ rebuild_st:
 
        sg = st->sgl;
        st->nents = 0;
+       sg_page_sizes = 0;
        for (i = 0; i < page_count; i++) {
                const unsigned int shrink[] = {
                        I915_SHRINK_BOUND | I915_SHRINK_UNBOUND | I915_SHRINK_PURGEABLE,
@@ -2390,8 +2447,10 @@ rebuild_st:
                if (!i ||
                    sg->length >= max_segment ||
                    page_to_pfn(page) != last_pfn + 1) {
-                       if (i)
+                       if (i) {
+                               sg_page_sizes |= sg->length;
                                sg = sg_next(sg);
+                       }
                        st->nents++;
                        sg_set_page(sg, page, PAGE_SIZE, 0);
                } else {
@@ -2402,8 +2461,10 @@ rebuild_st:
                /* Check that the i965g/gm workaround works. */
                WARN_ON((gfp & __GFP_DMA32) && (last_pfn >= 0x00100000UL));
        }
-       if (sg) /* loop terminated early; short sg table */
+       if (sg) { /* loop terminated early; short sg table */
+               sg_page_sizes |= sg->length;
                sg_mark_end(sg);
+       }
 
        /* Trim unused sg entries to avoid wasting memory. */
        i915_sg_trim(st);
@@ -2432,7 +2493,9 @@ rebuild_st:
        if (i915_gem_object_needs_bit17_swizzle(obj))
                i915_gem_object_do_bit_17_swizzle(obj, st);
 
-       return st;
+       __i915_gem_object_set_pages(obj, st, sg_page_sizes);
+
+       return 0;
 
 err_sg:
        sg_mark_end(sg);
@@ -2453,12 +2516,17 @@ err_pages:
        if (ret == -ENOSPC)
                ret = -ENOMEM;
 
-       return ERR_PTR(ret);
+       return ret;
 }
 
 void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
-                                struct sg_table *pages)
+                                struct sg_table *pages,
+                                unsigned int sg_page_sizes)
 {
+       struct drm_i915_private *i915 = to_i915(obj->base.dev);
+       unsigned long supported = INTEL_INFO(i915)->page_sizes;
+       int i;
+
        lockdep_assert_held(&obj->mm.lock);
 
        obj->mm.get_page.sg_pos = pages->sgl;
@@ -2467,30 +2535,48 @@ void __i915_gem_object_set_pages(struct drm_i915_gem_object *obj,
        obj->mm.pages = pages;
 
        if (i915_gem_object_is_tiled(obj) &&
-           to_i915(obj->base.dev)->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
+           i915->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
                GEM_BUG_ON(obj->mm.quirked);
                __i915_gem_object_pin_pages(obj);
                obj->mm.quirked = true;
        }
+
+       GEM_BUG_ON(!sg_page_sizes);
+       obj->mm.page_sizes.phys = sg_page_sizes;
+
+       /*
+        * Calculate the supported page-sizes which fit into the given
+        * sg_page_sizes. This will give us the page-sizes which we may be able
+        * to use opportunistically when later inserting into the GTT. For
+        * example if phys=2G, then in theory we should be able to use 1G, 2M,
+        * 64K or 4K pages, although in practice this will depend on a number of
+        * other factors.
+        */
+       obj->mm.page_sizes.sg = 0;
+       for_each_set_bit(i, &supported, ilog2(I915_GTT_MAX_PAGE_SIZE) + 1) {
+               if (obj->mm.page_sizes.phys & ~0u << i)
+                       obj->mm.page_sizes.sg |= BIT(i);
+       }
+       GEM_BUG_ON(!HAS_PAGE_SIZES(i915, obj->mm.page_sizes.sg));
+
+       spin_lock(&i915->mm.obj_lock);
+       list_add(&obj->mm.link, &i915->mm.unbound_list);
+       spin_unlock(&i915->mm.obj_lock);
 }
 
 static int ____i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
 {
-       struct sg_table *pages;
-
-       GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
+       int err;
 
        if (unlikely(obj->mm.madv != I915_MADV_WILLNEED)) {
                DRM_DEBUG("Attempting to obtain a purgeable object\n");
                return -EFAULT;
        }
 
-       pages = obj->ops->get_pages(obj);
-       if (unlikely(IS_ERR(pages)))
-               return PTR_ERR(pages);
+       err = obj->ops->get_pages(obj);
+       GEM_BUG_ON(!err && IS_ERR_OR_NULL(obj->mm.pages));
 
-       __i915_gem_object_set_pages(obj, pages);
-       return 0;
+       return err;
 }
 
 /* Ensure that the associated pages are gathered from the backing storage
@@ -2508,7 +2594,9 @@ int __i915_gem_object_get_pages(struct drm_i915_gem_object *obj)
        if (err)
                return err;
 
-       if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
+       if (unlikely(!i915_gem_object_has_pages(obj))) {
+               GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
+
                err = ____i915_gem_object_get_pages(obj);
                if (err)
                        goto unlock;
@@ -2591,7 +2679,9 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
        type &= ~I915_MAP_OVERRIDE;
 
        if (!atomic_inc_not_zero(&obj->mm.pages_pin_count)) {
-               if (unlikely(IS_ERR_OR_NULL(obj->mm.pages))) {
+               if (unlikely(!i915_gem_object_has_pages(obj))) {
+                       GEM_BUG_ON(i915_gem_object_has_pinned_pages(obj));
+
                        ret = ____i915_gem_object_get_pages(obj);
                        if (ret)
                                goto err_unlock;
@@ -2601,7 +2691,7 @@ void *i915_gem_object_pin_map(struct drm_i915_gem_object *obj,
                atomic_inc(&obj->mm.pages_pin_count);
                pinned = false;
        }
-       GEM_BUG_ON(!obj->mm.pages);
+       GEM_BUG_ON(!i915_gem_object_has_pages(obj));
 
        ptr = page_unpack_bits(obj->mm.mapping, &has_type);
        if (ptr && has_type != type) {
@@ -2656,7 +2746,7 @@ i915_gem_object_pwrite_gtt(struct drm_i915_gem_object *obj,
         * allows it to avoid the cost of retrieving a page (either swapin
         * or clearing-before-use) before it is overwritten.
         */
-       if (READ_ONCE(obj->mm.pages))
+       if (i915_gem_object_has_pages(obj))
                return -ENODEV;
 
        if (obj->mm.madv != I915_MADV_WILLNEED)
@@ -2800,7 +2890,17 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_request *request = NULL;
 
-       /* Prevent the signaler thread from updating the request
+       /*
+        * During the reset sequence, we must prevent the engine from
+        * entering RC6. As the context state is undefined until we restart
+        * the engine, if it does enter RC6 during the reset, the state
+        * written to the powercontext is undefined and so we may lose
+        * GPU state upon resume, i.e. fail to restart after a reset.
+        */
+       intel_uncore_forcewake_get(engine->i915, FORCEWAKE_ALL);
+
+       /*
+        * Prevent the signaler thread from updating the request
         * state (by calling dma_fence_signal) as we are processing
         * the reset. The write from the GPU of the seqno is
         * asynchronous and the signaler thread may see a different
@@ -2811,7 +2911,8 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
         */
        kthread_park(engine->breadcrumbs.signaler);
 
-       /* Prevent request submission to the hardware until we have
+       /*
+        * Prevent request submission to the hardware until we have
         * completed the reset in i915_gem_reset_finish(). If a request
         * is completed by one engine, it may then queue a request
         * to a second via its engine->irq_tasklet *just* as we are
@@ -2819,8 +2920,8 @@ i915_gem_reset_prepare_engine(struct intel_engine_cs *engine)
         * Turning off the engine->irq_tasklet until the reset is over
         * prevents the race.
         */
-       tasklet_kill(&engine->irq_tasklet);
-       tasklet_disable(&engine->irq_tasklet);
+       tasklet_kill(&engine->execlists.irq_tasklet);
+       tasklet_disable(&engine->execlists.irq_tasklet);
 
        if (engine->irq_seqno_barrier)
                engine->irq_seqno_barrier(engine);
@@ -2999,8 +3100,10 @@ void i915_gem_reset(struct drm_i915_private *dev_priv)
 
 void i915_gem_reset_finish_engine(struct intel_engine_cs *engine)
 {
-       tasklet_enable(&engine->irq_tasklet);
+       tasklet_enable(&engine->execlists.irq_tasklet);
        kthread_unpark(engine->breadcrumbs.signaler);
+
+       intel_uncore_forcewake_put(engine->i915, FORCEWAKE_ALL);
 }
 
 void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
@@ -3017,10 +3120,16 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv)
 }
 
 static void nop_submit_request(struct drm_i915_gem_request *request)
+{
+       dma_fence_set_error(&request->fence, -EIO);
+
+       i915_gem_request_submit(request);
+}
+
+static void nop_complete_submit_request(struct drm_i915_gem_request *request)
 {
        unsigned long flags;
 
-       GEM_BUG_ON(!i915_terminally_wedged(&request->i915->gpu_error));
        dma_fence_set_error(&request->fence, -EIO);
 
        spin_lock_irqsave(&request->engine->timeline->lock, flags);
@@ -3029,81 +3138,59 @@ static void nop_submit_request(struct drm_i915_gem_request *request)
        spin_unlock_irqrestore(&request->engine->timeline->lock, flags);
 }
 
-static void engine_set_wedged(struct intel_engine_cs *engine)
+void i915_gem_set_wedged(struct drm_i915_private *i915)
 {
-       struct drm_i915_gem_request *request;
-       unsigned long flags;
+       struct intel_engine_cs *engine;
+       enum intel_engine_id id;
 
-       /* We need to be sure that no thread is running the old callback as
-        * we install the nop handler (otherwise we would submit a request
-        * to hardware that will never complete). In order to prevent this
-        * race, we wait until the machine is idle before making the swap
-        * (using stop_machine()).
+       /*
+        * First, stop submission to hw, but do not yet complete requests by
+        * rolling the global seqno forward (since this would complete requests
+        * for which we haven't set the fence error to EIO yet).
         */
-       engine->submit_request = nop_submit_request;
-
-       /* Mark all executing requests as skipped */
-       spin_lock_irqsave(&engine->timeline->lock, flags);
-       list_for_each_entry(request, &engine->timeline->requests, link)
-               if (!i915_gem_request_completed(request))
-                       dma_fence_set_error(&request->fence, -EIO);
-       spin_unlock_irqrestore(&engine->timeline->lock, flags);
+       for_each_engine(engine, i915, id)
+               engine->submit_request = nop_submit_request;
 
        /*
-        * Clear the execlists queue up before freeing the requests, as those
-        * are the ones that keep the context and ringbuffer backing objects
-        * pinned in place.
+        * Make sure no one is running the old callback before we proceed with
+        * cancelling requests and resetting the completion tracking. Otherwise
+        * we might submit a request to the hardware which never completes.
         */
+       synchronize_rcu();
 
-       if (i915.enable_execlists) {
-               struct execlist_port *port = engine->execlist_port;
-               unsigned long flags;
-               unsigned int n;
-
-               spin_lock_irqsave(&engine->timeline->lock, flags);
-
-               for (n = 0; n < ARRAY_SIZE(engine->execlist_port); n++)
-                       i915_gem_request_put(port_request(&port[n]));
-               memset(engine->execlist_port, 0, sizeof(engine->execlist_port));
-               engine->execlist_queue = RB_ROOT;
-               engine->execlist_first = NULL;
-
-               spin_unlock_irqrestore(&engine->timeline->lock, flags);
+       for_each_engine(engine, i915, id) {
+               /* Mark all executing requests as skipped */
+               engine->cancel_requests(engine);
 
-               /* The port is checked prior to scheduling a tasklet, but
-                * just in case we have suspended the tasklet to do the
-                * wedging make sure that when it wakes, it decides there
-                * is no work to do by clearing the irq_posted bit.
+               /*
+                * Only once we've force-cancelled all in-flight requests can we
+                * start to complete all requests.
                 */
-               clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
+               engine->submit_request = nop_complete_submit_request;
        }
 
-       /* Mark all pending requests as complete so that any concurrent
-        * (lockless) lookup doesn't try and wait upon the request as we
-        * reset it.
+       /*
+        * Make sure no request can slip through without getting completed by
+        * either this call here to intel_engine_init_global_seqno, or the one
+        * in nop_complete_submit_request.
         */
-       intel_engine_init_global_seqno(engine,
-                                      intel_engine_last_submit(engine));
-}
+       synchronize_rcu();
 
-static int __i915_gem_set_wedged_BKL(void *data)
-{
-       struct drm_i915_private *i915 = data;
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
+       for_each_engine(engine, i915, id) {
+               unsigned long flags;
 
-       for_each_engine(engine, i915, id)
-               engine_set_wedged(engine);
+               /* Mark all pending requests as complete so that any concurrent
+                * (lockless) lookup doesn't try and wait upon the request as we
+                * reset it.
+                */
+               spin_lock_irqsave(&engine->timeline->lock, flags);
+               intel_engine_init_global_seqno(engine,
+                                              intel_engine_last_submit(engine));
+               spin_unlock_irqrestore(&engine->timeline->lock, flags);
+       }
 
        set_bit(I915_WEDGED, &i915->gpu_error.flags);
        wake_up_all(&i915->gpu_error.reset_queue);
-
-       return 0;
-}
-
-void i915_gem_set_wedged(struct drm_i915_private *dev_priv)
-{
-       stop_machine(__i915_gem_set_wedged_BKL, dev_priv, NULL);
 }
 
 bool i915_gem_unset_wedged(struct drm_i915_private *i915)
@@ -3267,11 +3354,11 @@ void i915_gem_close_object(struct drm_gem_object *gem, struct drm_file *file)
                struct i915_gem_context *ctx = lut->ctx;
                struct i915_vma *vma;
 
+               GEM_BUG_ON(ctx->file_priv == ERR_PTR(-EBADF));
                if (ctx->file_priv != fpriv)
                        continue;
 
                vma = radix_tree_delete(&ctx->handles_vma, lut->handle);
-
                GEM_BUG_ON(vma->obj != obj);
 
                /* We allow the process to have multiple handles to the same
@@ -3385,24 +3472,12 @@ static int wait_for_timeline(struct i915_gem_timeline *tl, unsigned int flags)
        return 0;
 }
 
-static int wait_for_engine(struct intel_engine_cs *engine, int timeout_ms)
-{
-       return wait_for(intel_engine_is_idle(engine), timeout_ms);
-}
-
 static int wait_for_engines(struct drm_i915_private *i915)
 {
-       struct intel_engine_cs *engine;
-       enum intel_engine_id id;
-
-       for_each_engine(engine, i915, id) {
-               if (GEM_WARN_ON(wait_for_engine(engine, 50))) {
-                       i915_gem_set_wedged(i915);
-                       return -EIO;
-               }
-
-               GEM_BUG_ON(intel_engine_get_seqno(engine) !=
-                          intel_engine_last_submit(engine));
+       if (wait_for(intel_engines_are_idle(i915), 50)) {
+               DRM_ERROR("Failed to idle engines, declaring wedged!\n");
+               i915_gem_set_wedged(i915);
+               return -EIO;
        }
 
        return 0;
@@ -3452,7 +3527,7 @@ static void __i915_gem_object_flush_for_display(struct drm_i915_gem_object *obj)
 
 void i915_gem_object_flush_if_display(struct drm_i915_gem_object *obj)
 {
-       if (!READ_ONCE(obj->pin_display))
+       if (!READ_ONCE(obj->pin_global))
                return;
 
        mutex_lock(&obj->base.dev->struct_mutex);
@@ -3819,10 +3894,10 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 
        lockdep_assert_held(&obj->base.dev->struct_mutex);
 
-       /* Mark the pin_display early so that we account for the
+       /* Mark the global pin early so that we account for the
         * display coherency whilst setting up the cache domains.
         */
-       obj->pin_display++;
+       obj->pin_global++;
 
        /* The display engine is not coherent with the LLC cache on gen6.  As
         * a result, we make sure that the pinning that is about to occur is
@@ -3838,7 +3913,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
                                              I915_CACHE_WT : I915_CACHE_NONE);
        if (ret) {
                vma = ERR_PTR(ret);
-               goto err_unpin_display;
+               goto err_unpin_global;
        }
 
        /* As the user may map the buffer once pinned in the display plane
@@ -3869,7 +3944,7 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
                vma = i915_gem_object_ggtt_pin(obj, view, 0, alignment, flags);
        }
        if (IS_ERR(vma))
-               goto err_unpin_display;
+               goto err_unpin_global;
 
        vma->display_alignment = max_t(u64, vma->display_alignment, alignment);
 
@@ -3884,8 +3959,8 @@ i915_gem_object_pin_to_display_plane(struct drm_i915_gem_object *obj,
 
        return vma;
 
-err_unpin_display:
-       obj->pin_display--;
+err_unpin_global:
+       obj->pin_global--;
        return vma;
 }
 
@@ -3894,10 +3969,10 @@ i915_gem_object_unpin_from_display_plane(struct i915_vma *vma)
 {
        lockdep_assert_held(&vma->vm->i915->drm.struct_mutex);
 
-       if (WARN_ON(vma->obj->pin_display == 0))
+       if (WARN_ON(vma->obj->pin_global == 0))
                return;
 
-       if (--vma->obj->pin_display == 0)
+       if (--vma->obj->pin_global == 0)
                vma->display_alignment = I915_GTT_MIN_ALIGNMENT;
 
        /* Bump the LRU to try and avoid premature eviction whilst flipping  */
@@ -4016,42 +4091,47 @@ i915_gem_object_ggtt_pin(struct drm_i915_gem_object *obj,
 
        lockdep_assert_held(&obj->base.dev->struct_mutex);
 
+       if (!view && flags & PIN_MAPPABLE) {
+               /* If the required space is larger than the available
+                * aperture, we will not able to find a slot for the
+                * object and unbinding the object now will be in
+                * vain. Worse, doing so may cause us to ping-pong
+                * the object in and out of the Global GTT and
+                * waste a lot of cycles under the mutex.
+                */
+               if (obj->base.size > dev_priv->ggtt.mappable_end)
+                       return ERR_PTR(-E2BIG);
+
+               /* If NONBLOCK is set the caller is optimistically
+                * trying to cache the full object within the mappable
+                * aperture, and *must* have a fallback in place for
+                * situations where we cannot bind the object. We
+                * can be a little more lax here and use the fallback
+                * more often to avoid costly migrations of ourselves
+                * and other objects within the aperture.
+                *
+                * Half-the-aperture is used as a simple heuristic.
+                * More interesting would to do search for a free
+                * block prior to making the commitment to unbind.
+                * That caters for the self-harm case, and with a
+                * little more heuristics (e.g. NOFAULT, NOEVICT)
+                * we could try to minimise harm to others.
+                */
+               if (flags & PIN_NONBLOCK &&
+                   obj->base.size > dev_priv->ggtt.mappable_end / 2)
+                       return ERR_PTR(-ENOSPC);
+       }
+
        vma = i915_vma_instance(obj, vm, view);
        if (unlikely(IS_ERR(vma)))
                return vma;
 
        if (i915_vma_misplaced(vma, size, alignment, flags)) {
-               if (flags & PIN_NONBLOCK &&
-                   (i915_vma_is_pinned(vma) || i915_vma_is_active(vma)))
-                       return ERR_PTR(-ENOSPC);
+               if (flags & PIN_NONBLOCK) {
+                       if (i915_vma_is_pinned(vma) || i915_vma_is_active(vma))
+                               return ERR_PTR(-ENOSPC);
 
-               if (flags & PIN_MAPPABLE) {
-                       /* If the required space is larger than the available
-                        * aperture, we will not able to find a slot for the
-                        * object and unbinding the object now will be in
-                        * vain. Worse, doing so may cause us to ping-pong
-                        * the object in and out of the Global GTT and
-                        * waste a lot of cycles under the mutex.
-                        */
-                       if (vma->fence_size > dev_priv->ggtt.mappable_end)
-                               return ERR_PTR(-E2BIG);
-
-                       /* If NONBLOCK is set the caller is optimistically
-                        * trying to cache the full object within the mappable
-                        * aperture, and *must* have a fallback in place for
-                        * situations where we cannot bind the object. We
-                        * can be a little more lax here and use the fallback
-                        * more often to avoid costly migrations of ourselves
-                        * and other objects within the aperture.
-                        *
-                        * Half-the-aperture is used as a simple heuristic.
-                        * More interesting would to do search for a free
-                        * block prior to making the commitment to unbind.
-                        * That caters for the self-harm case, and with a
-                        * little more heuristics (e.g. NOFAULT, NOEVICT)
-                        * we could try to minimise harm to others.
-                        */
-                       if (flags & PIN_NONBLOCK &&
+                       if (flags & PIN_MAPPABLE &&
                            vma->fence_size > dev_priv->ggtt.mappable_end / 2)
                                return ERR_PTR(-ENOSPC);
                }
@@ -4232,7 +4312,7 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
        if (err)
                goto out;
 
-       if (obj->mm.pages &&
+       if (i915_gem_object_has_pages(obj) &&
            i915_gem_object_is_tiled(obj) &&
            dev_priv->quirks & QUIRK_PIN_SWIZZLED_PAGES) {
                if (obj->mm.madv == I915_MADV_WILLNEED) {
@@ -4251,7 +4331,8 @@ i915_gem_madvise_ioctl(struct drm_device *dev, void *data,
                obj->mm.madv = args->madv;
 
        /* if the object is no longer attached, discard its backing storage */
-       if (obj->mm.madv == I915_MADV_DONTNEED && !obj->mm.pages)
+       if (obj->mm.madv == I915_MADV_DONTNEED &&
+           !i915_gem_object_has_pages(obj))
                i915_gem_object_truncate(obj);
 
        args->retained = obj->mm.madv != __I915_MADV_PURGED;
@@ -4277,8 +4358,6 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
 {
        mutex_init(&obj->mm.lock);
 
-       INIT_LIST_HEAD(&obj->global_link);
-       INIT_LIST_HEAD(&obj->userfault_link);
        INIT_LIST_HEAD(&obj->vma_list);
        INIT_LIST_HEAD(&obj->lut_list);
        INIT_LIST_HEAD(&obj->batch_pool_link);
@@ -4308,6 +4387,30 @@ static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
        .pwrite = i915_gem_object_pwrite_gtt,
 };
 
+static int i915_gem_object_create_shmem(struct drm_device *dev,
+                                       struct drm_gem_object *obj,
+                                       size_t size)
+{
+       struct drm_i915_private *i915 = to_i915(dev);
+       unsigned long flags = VM_NORESERVE;
+       struct file *filp;
+
+       drm_gem_private_object_init(dev, obj, size);
+
+       if (i915->mm.gemfs)
+               filp = shmem_file_setup_with_mnt(i915->mm.gemfs, "i915", size,
+                                                flags);
+       else
+               filp = shmem_file_setup("i915", size, flags);
+
+       if (IS_ERR(filp))
+               return PTR_ERR(filp);
+
+       obj->filp = filp;
+
+       return 0;
+}
+
 struct drm_i915_gem_object *
 i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
 {
@@ -4332,7 +4435,7 @@ i915_gem_object_create(struct drm_i915_private *dev_priv, u64 size)
        if (obj == NULL)
                return ERR_PTR(-ENOMEM);
 
-       ret = drm_gem_object_init(&dev_priv->drm, &obj->base, size);
+       ret = i915_gem_object_create_shmem(&dev_priv->drm, &obj->base, size);
        if (ret)
                goto fail;
 
@@ -4409,13 +4512,14 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 {
        struct drm_i915_gem_object *obj, *on;
 
-       mutex_lock(&i915->drm.struct_mutex);
        intel_runtime_pm_get(i915);
-       llist_for_each_entry(obj, freed, freed) {
+       llist_for_each_entry_safe(obj, on, freed, freed) {
                struct i915_vma *vma, *vn;
 
                trace_i915_gem_object_destroy(obj);
 
+               mutex_lock(&i915->drm.struct_mutex);
+
                GEM_BUG_ON(i915_gem_object_is_active(obj));
                list_for_each_entry_safe(vma, vn,
                                         &obj->vma_list, obj_link) {
@@ -4426,16 +4530,24 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
                GEM_BUG_ON(!list_empty(&obj->vma_list));
                GEM_BUG_ON(!RB_EMPTY_ROOT(&obj->vma_tree));
 
-               list_del(&obj->global_link);
-       }
-       intel_runtime_pm_put(i915);
-       mutex_unlock(&i915->drm.struct_mutex);
+               /* This serializes freeing with the shrinker. Since the free
+                * is delayed, first by RCU then by the workqueue, we want the
+                * shrinker to be able to free pages of unreferenced objects,
+                * or else we may oom whilst there are plenty of deferred
+                * freed objects.
+                */
+               if (i915_gem_object_has_pages(obj)) {
+                       spin_lock(&i915->mm.obj_lock);
+                       list_del_init(&obj->mm.link);
+                       spin_unlock(&i915->mm.obj_lock);
+               }
 
-       cond_resched();
+               mutex_unlock(&i915->drm.struct_mutex);
 
-       llist_for_each_entry_safe(obj, on, freed, freed) {
                GEM_BUG_ON(obj->bind_count);
+               GEM_BUG_ON(obj->userfault_count);
                GEM_BUG_ON(atomic_read(&obj->frontbuffer_bits));
+               GEM_BUG_ON(!list_empty(&obj->lut_list));
 
                if (obj->ops->release)
                        obj->ops->release(obj);
@@ -4443,7 +4555,7 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
                if (WARN_ON(i915_gem_object_has_pinned_pages(obj)))
                        atomic_set(&obj->mm.pages_pin_count, 0);
                __i915_gem_object_put_pages(obj, I915_MM_NORMAL);
-               GEM_BUG_ON(obj->mm.pages);
+               GEM_BUG_ON(i915_gem_object_has_pages(obj));
 
                if (obj->base.import_attach)
                        drm_prime_gem_destroy(&obj->base, NULL);
@@ -4454,16 +4566,29 @@ static void __i915_gem_free_objects(struct drm_i915_private *i915,
 
                kfree(obj->bit_17);
                i915_gem_object_free(obj);
+
+               if (on)
+                       cond_resched();
        }
+       intel_runtime_pm_put(i915);
 }
 
 static void i915_gem_flush_free_objects(struct drm_i915_private *i915)
 {
        struct llist_node *freed;
 
-       freed = llist_del_all(&i915->mm.free_list);
-       if (unlikely(freed))
+       /* Free the oldest, most stale object to keep the free_list short */
+       freed = NULL;
+       if (!llist_empty(&i915->mm.free_list)) { /* quick test for hotpath */
+               /* Only one consumer of llist_del_first() allowed */
+               spin_lock(&i915->mm.free_lock);
+               freed = llist_del_first(&i915->mm.free_list);
+               spin_unlock(&i915->mm.free_lock);
+       }
+       if (unlikely(freed)) {
+               freed->next = NULL;
                __i915_gem_free_objects(i915, freed);
+       }
 }
 
 static void __i915_gem_free_work(struct work_struct *work)
@@ -4480,11 +4605,17 @@ static void __i915_gem_free_work(struct work_struct *work)
         * unbound now.
         */
 
+       spin_lock(&i915->mm.free_lock);
        while ((freed = llist_del_all(&i915->mm.free_list))) {
+               spin_unlock(&i915->mm.free_lock);
+
                __i915_gem_free_objects(i915, freed);
                if (need_resched())
-                       break;
+                       return;
+
+               spin_lock(&i915->mm.free_lock);
        }
+       spin_unlock(&i915->mm.free_lock);
 }
 
 static void __i915_gem_free_object_rcu(struct rcu_head *head)
@@ -4543,6 +4674,12 @@ static void assert_kernel_context_is_current(struct drm_i915_private *dev_priv)
 
 void i915_gem_sanitize(struct drm_i915_private *i915)
 {
+       if (i915_terminally_wedged(&i915->gpu_error)) {
+               mutex_lock(&i915->drm.struct_mutex);
+               i915_gem_unset_wedged(i915);
+               mutex_unlock(&i915->drm.struct_mutex);
+       }
+
        /*
         * If we inherit context state from the BIOS or earlier occupants
         * of the GPU, the GPU may be in an inconsistent state when we
@@ -4582,7 +4719,7 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
        ret = i915_gem_wait_for_idle(dev_priv,
                                     I915_WAIT_INTERRUPTIBLE |
                                     I915_WAIT_LOCKED);
-       if (ret)
+       if (ret && ret != -EIO)
                goto err_unlock;
 
        assert_kernel_context_is_current(dev_priv);
@@ -4597,14 +4734,14 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
        /* As the idle_work is rearming if it detects a race, play safe and
         * repeat the flush until it is definitely idle.
         */
-       while (flush_delayed_work(&dev_priv->gt.idle_work))
-               ;
+       drain_delayed_work(&dev_priv->gt.idle_work);
 
        /* Assert that we sucessfully flushed all the work and
         * reset the GPU back to its idle, low power state.
         */
        WARN_ON(dev_priv->gt.awake);
-       WARN_ON(!intel_engines_are_idle(dev_priv));
+       if (WARN_ON(!intel_engines_are_idle(dev_priv)))
+               i915_gem_set_wedged(dev_priv); /* no hope, discard everything */
 
        /*
         * Neither the BIOS, ourselves or any other kernel
@@ -4626,11 +4763,12 @@ int i915_gem_suspend(struct drm_i915_private *dev_priv)
         * machine in an unusable condition.
         */
        i915_gem_sanitize(dev_priv);
-       goto out_rpm_put;
+
+       intel_runtime_pm_put(dev_priv);
+       return 0;
 
 err_unlock:
        mutex_unlock(&dev->struct_mutex);
-out_rpm_put:
        intel_runtime_pm_put(dev_priv);
        return ret;
 }
@@ -4643,6 +4781,7 @@ void i915_gem_resume(struct drm_i915_private *dev_priv)
 
        mutex_lock(&dev->struct_mutex);
        i915_gem_restore_gtt_mappings(dev_priv);
+       i915_gem_restore_fences(dev_priv);
 
        /* As we didn't flush the kernel context before suspend, we cannot
         * guarantee that the context image is complete. So let's just reset
@@ -4756,6 +4895,10 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
        init_unused_rings(dev_priv);
 
        BUG_ON(!dev_priv->kernel_context);
+       if (i915_terminally_wedged(&dev_priv->gpu_error)) {
+               ret = -EIO;
+               goto out;
+       }
 
        ret = i915_ppgtt_init_hw(dev_priv);
        if (ret) {
@@ -4786,7 +4929,7 @@ bool intel_sanitize_semaphores(struct drm_i915_private *dev_priv, int value)
                return false;
 
        /* TODO: make semaphores and Execlists play nicely together */
-       if (i915.enable_execlists)
+       if (i915_modparams.enable_execlists)
                return false;
 
        if (value >= 0)
@@ -4805,9 +4948,18 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
 
        mutex_lock(&dev_priv->drm.struct_mutex);
 
+       /*
+        * We need to fallback to 4K pages since gvt gtt handling doesn't
+        * support huge page entries - we will need to check either hypervisor
+        * mm can support huge guest page or just do emulation in gvt.
+        */
+       if (intel_vgpu_active(dev_priv))
+               mkwrite_device_info(dev_priv)->page_sizes =
+                       I915_GTT_PAGE_SIZE_4K;
+
        dev_priv->mm.unordered_timeline = dma_fence_context_alloc(1);
 
-       if (!i915.enable_execlists) {
+       if (!i915_modparams.enable_execlists) {
                dev_priv->gt.resume = intel_legacy_submission_resume;
                dev_priv->gt.cleanup_engine = intel_engine_cleanup;
        } else {
@@ -4845,8 +4997,10 @@ int i915_gem_init(struct drm_i915_private *dev_priv)
                 * wedged. But we only want to do this where the GPU is angry,
                 * for all other failure, such as an allocation failure, bail.
                 */
-               DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
-               i915_gem_set_wedged(dev_priv);
+               if (!i915_terminally_wedged(&dev_priv->gpu_error)) {
+                       DRM_ERROR("Failed to initialize GPU, declaring it wedged\n");
+                       i915_gem_set_wedged(dev_priv);
+               }
                ret = 0;
        }
 
@@ -4946,11 +5100,15 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
                goto err_priorities;
 
        INIT_WORK(&dev_priv->mm.free_work, __i915_gem_free_work);
+
+       spin_lock_init(&dev_priv->mm.obj_lock);
+       spin_lock_init(&dev_priv->mm.free_lock);
        init_llist_head(&dev_priv->mm.free_list);
        INIT_LIST_HEAD(&dev_priv->mm.unbound_list);
        INIT_LIST_HEAD(&dev_priv->mm.bound_list);
        INIT_LIST_HEAD(&dev_priv->mm.fence_list);
        INIT_LIST_HEAD(&dev_priv->mm.userfault_list);
+
        INIT_DELAYED_WORK(&dev_priv->gt.retire_work,
                          i915_gem_retire_work_handler);
        INIT_DELAYED_WORK(&dev_priv->gt.idle_work,
@@ -4962,6 +5120,10 @@ i915_gem_load_init(struct drm_i915_private *dev_priv)
 
        spin_lock_init(&dev_priv->fb_tracking.lock);
 
+       err = i915_gemfs_init(dev_priv);
+       if (err)
+               DRM_NOTE("Unable to create a private tmpfs mount, hugepage support will be disabled(%d).\n", err);
+
        return 0;
 
 err_priorities:
@@ -5000,6 +5162,8 @@ void i915_gem_load_cleanup(struct drm_i915_private *dev_priv)
 
        /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
        rcu_barrier();
+
+       i915_gemfs_fini(dev_priv);
 }
 
 int i915_gem_freeze(struct drm_i915_private *dev_priv)
@@ -5038,12 +5202,12 @@ int i915_gem_freeze_late(struct drm_i915_private *dev_priv)
        i915_gem_shrink(dev_priv, -1UL, NULL, I915_SHRINK_UNBOUND);
        i915_gem_drain_freed_objects(dev_priv);
 
-       mutex_lock(&dev_priv->drm.struct_mutex);
+       spin_lock(&dev_priv->mm.obj_lock);
        for (p = phases; *p; p++) {
-               list_for_each_entry(obj, *p, global_link)
+               list_for_each_entry(obj, *p, mm.link)
                        __start_cpu_write(obj);
        }
-       mutex_unlock(&dev_priv->drm.struct_mutex);
+       spin_unlock(&dev_priv->mm.obj_lock);
 
        return 0;
 }
@@ -5362,7 +5526,17 @@ int i915_gem_object_attach_phys(struct drm_i915_gem_object *obj, int align)
                goto err_unlock;
        }
 
-       pages = obj->mm.pages;
+       pages = fetch_and_zero(&obj->mm.pages);
+       if (pages) {
+               struct drm_i915_private *i915 = to_i915(obj->base.dev);
+
+               __i915_gem_object_reset_page_iter(obj);
+
+               spin_lock(&i915->mm.obj_lock);
+               list_del(&obj->mm.link);
+               spin_unlock(&i915->mm.obj_lock);
+       }
+
        obj->ops = &i915_gem_phys_ops;
 
        err = ____i915_gem_object_get_pages(obj);
@@ -5389,6 +5563,7 @@ err_unlock:
 #include "selftests/scatterlist.c"
 #include "selftests/mock_gem_device.c"
 #include "selftests/huge_gem_object.c"
+#include "selftests/huge_pages.c"
 #include "selftests/i915_gem_object.c"
 #include "selftests/i915_gem_coherency.c"
 #endif