Merge drm/drm-next into drm-intel-next-queued
[linux-2.6-block.git] / drivers / gpu / drm / i915 / i915_gem.c
index 0c8aa57ce83b4033723ded1540986b628d92d772..c55b1f75c9803e72fe339020bec14d1718385ab2 100644 (file)
@@ -1740,6 +1740,7 @@ i915_gem_set_domain_ioctl(struct drm_device *dev, void *data,
         */
        err = i915_gem_object_wait(obj,
                                   I915_WAIT_INTERRUPTIBLE |
+                                  I915_WAIT_PRIORITY |
                                   (write_domain ? I915_WAIT_ALL : 0),
                                   MAX_SCHEDULE_TIMEOUT,
                                   to_rps_client(file));
@@ -2381,11 +2382,23 @@ void __i915_gem_object_invalidate(struct drm_i915_gem_object *obj)
        invalidate_mapping_pages(mapping, 0, (loff_t)-1);
 }
 
+/*
+ * Move pages to appropriate lru and release the pagevec, decrementing the
+ * ref count of those pages.
+ */
+static void check_release_pagevec(struct pagevec *pvec)
+{
+       check_move_unevictable_pages(pvec);
+       __pagevec_release(pvec);
+       cond_resched();
+}
+
 static void
 i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
                              struct sg_table *pages)
 {
        struct sgt_iter sgt_iter;
+       struct pagevec pvec;
        struct page *page;
 
        __i915_gem_object_release_shmem(obj, pages, true);
@@ -2395,6 +2408,9 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
        if (i915_gem_object_needs_bit17_swizzle(obj))
                i915_gem_object_save_bit_17_swizzle(obj, pages);
 
+       mapping_clear_unevictable(file_inode(obj->base.filp)->i_mapping);
+
+       pagevec_init(&pvec);
        for_each_sgt_page(page, sgt_iter, pages) {
                if (obj->mm.dirty)
                        set_page_dirty(page);
@@ -2402,8 +2418,11 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
                if (obj->mm.madv == I915_MADV_WILLNEED)
                        mark_page_accessed(page);
 
-               put_page(page);
+               if (!pagevec_add(&pvec, page))
+                       check_release_pagevec(&pvec);
        }
+       if (pagevec_count(&pvec))
+               check_release_pagevec(&pvec);
        obj->mm.dirty = false;
 
        sg_free_table(pages);
@@ -2483,7 +2502,7 @@ unlock:
        mutex_unlock(&obj->mm.lock);
 }
 
-static bool i915_sg_trim(struct sg_table *orig_st)
+bool i915_sg_trim(struct sg_table *orig_st)
 {
        struct sg_table new_st;
        struct scatterlist *sg, *new_sg;
@@ -2524,6 +2543,7 @@ static int i915_gem_object_get_pages_gtt(struct drm_i915_gem_object *obj)
        unsigned long last_pfn = 0;     /* suppress gcc warning */
        unsigned int max_segment = i915_sg_segment_size();
        unsigned int sg_page_sizes;
+       struct pagevec pvec;
        gfp_t noreclaim;
        int ret;
 
@@ -2559,6 +2579,7 @@ rebuild_st:
         * Fail silently without starting the shrinker
         */
        mapping = obj->base.filp->f_mapping;
+       mapping_set_unevictable(mapping);
        noreclaim = mapping_gfp_constraint(mapping, ~__GFP_RECLAIM);
        noreclaim |= __GFP_NORETRY | __GFP_NOWARN;
 
@@ -2573,6 +2594,7 @@ rebuild_st:
                gfp_t gfp = noreclaim;
 
                do {
+                       cond_resched();
                        page = shmem_read_mapping_page_gfp(mapping, i, gfp);
                        if (likely(!IS_ERR(page)))
                                break;
@@ -2583,7 +2605,6 @@ rebuild_st:
                        }
 
                        i915_gem_shrink(dev_priv, 2 * page_count, NULL, *s++);
-                       cond_resched();
 
                        /*
                         * We've tried hard to allocate the memory by reaping
@@ -2673,8 +2694,14 @@ rebuild_st:
 err_sg:
        sg_mark_end(sg);
 err_pages:
-       for_each_sgt_page(page, sgt_iter, st)
-               put_page(page);
+       mapping_clear_unevictable(mapping);
+       pagevec_init(&pvec);
+       for_each_sgt_page(page, sgt_iter, st) {
+               if (!pagevec_add(&pvec, page))
+                       check_release_pagevec(&pvec);
+       }
+       if (pagevec_count(&pvec))
+               check_release_pagevec(&pvec);
        sg_free_table(st);
        kfree(st);
 
@@ -3530,6 +3557,8 @@ static void __sleep_rcu(struct rcu_head *rcu)
        struct sleep_rcu_work *s = container_of(rcu, typeof(*s), rcu);
        struct drm_i915_private *i915 = s->i915;
 
+       destroy_rcu_head(&s->rcu);
+
        if (same_epoch(i915, s->epoch)) {
                INIT_WORK(&s->work, __sleep_work);
                queue_work(i915->wq, &s->work);
@@ -3646,6 +3675,7 @@ out_rearm:
        if (same_epoch(dev_priv, epoch)) {
                struct sleep_rcu_work *s = kmalloc(sizeof(*s), GFP_KERNEL);
                if (s) {
+                       init_rcu_head(&s->rcu);
                        s->i915 = dev_priv;
                        s->epoch = epoch;
                        call_rcu(&s->rcu, __sleep_rcu);
@@ -3743,7 +3773,9 @@ i915_gem_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file)
        start = ktime_get();
 
        ret = i915_gem_object_wait(obj,
-                                  I915_WAIT_INTERRUPTIBLE | I915_WAIT_ALL,
+                                  I915_WAIT_INTERRUPTIBLE |
+                                  I915_WAIT_PRIORITY |
+                                  I915_WAIT_ALL,
                                   to_wait_timeout(args->timeout_ns),
                                   to_rps_client(file));
 
@@ -4710,6 +4742,8 @@ void i915_gem_object_init(struct drm_i915_gem_object *obj,
        INIT_LIST_HEAD(&obj->lut_list);
        INIT_LIST_HEAD(&obj->batch_pool_link);
 
+       init_rcu_head(&obj->rcu);
+
        obj->ops = ops;
 
        reservation_object_init(&obj->__builtin_resv);
@@ -4976,6 +5010,13 @@ static void __i915_gem_free_object_rcu(struct rcu_head *head)
                container_of(head, typeof(*obj), rcu);
        struct drm_i915_private *i915 = to_i915(obj->base.dev);
 
+       /*
+        * We reuse obj->rcu for the freed list, so we had better not treat
+        * it like a rcu_head from this point forwards. And we expect all
+        * objects to be freed via this path.
+        */
+       destroy_rcu_head(&obj->rcu);
+
        /*
         * Since we require blocking on struct_mutex to unbind the freed
         * object from the GPU before releasing resources back to the
@@ -5293,18 +5334,6 @@ int i915_gem_init_hw(struct drm_i915_private *dev_priv)
                I915_WRITE(MI_PREDICATE_RESULT_2, IS_HSW_GT3(dev_priv) ?
                           LOWER_SLICE_ENABLED : LOWER_SLICE_DISABLED);
 
-       if (HAS_PCH_NOP(dev_priv)) {
-               if (IS_IVYBRIDGE(dev_priv)) {
-                       u32 temp = I915_READ(GEN7_MSG_CTL);
-                       temp &= ~(WAIT_FOR_PCH_FLR_ACK | WAIT_FOR_PCH_RESET_ACK);
-                       I915_WRITE(GEN7_MSG_CTL, temp);
-               } else if (INTEL_GEN(dev_priv) >= 7) {
-                       u32 temp = I915_READ(HSW_NDE_RSTWRN_OPT);
-                       temp &= ~RESET_PCH_HANDSHAKE_ENABLE;
-                       I915_WRITE(HSW_NDE_RSTWRN_OPT, temp);
-               }
-       }
-
        intel_gt_workarounds_apply(dev_priv);
 
        i915_gem_init_swizzling(dev_priv);
@@ -5951,7 +5980,7 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
         * the bits.
         */
        BUILD_BUG_ON(INTEL_FRONTBUFFER_BITS_PER_PIPE * I915_MAX_PIPES >
-                    sizeof(atomic_t) * BITS_PER_BYTE);
+                    BITS_PER_TYPE(atomic_t));
 
        if (old) {
                WARN_ON(!(atomic_read(&old->frontbuffer_bits) & frontbuffer_bits));