drm/i915: Lift timeline into intel_context
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 9 Aug 2019 18:25:18 +0000 (19:25 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 9 Aug 2019 19:18:30 +0000 (20:18 +0100)
Move the timeline from being inside the intel_ring to intel_context
itself. This saves much pointer dancing and makes the relations of the
context to its timeline much clearer.

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190809182518.20486-4-chris@chris-wilson.co.uk
15 files changed:
drivers/gpu/drm/i915/gem/i915_gem_context.c
drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c
drivers/gpu/drm/i915/gt/intel_context.c
drivers/gpu/drm/i915/gt/intel_context.h
drivers/gpu/drm/i915/gt/intel_context_types.h
drivers/gpu/drm/i915/gt/intel_engine.h
drivers/gpu/drm/i915/gt/intel_engine_cs.c
drivers/gpu/drm/i915/gt/intel_engine_types.h
drivers/gpu/drm/i915/gt/intel_lrc.c
drivers/gpu/drm/i915/gt/intel_ringbuffer.c
drivers/gpu/drm/i915/gt/mock_engine.c
drivers/gpu/drm/i915/gt/selftest_context.c
drivers/gpu/drm/i915/i915_active.c
drivers/gpu/drm/i915/i915_request.c
drivers/gpu/drm/i915/selftests/i915_gem_evict.c

index d1952637cd91ed64045a05767052856ab5eb7276..a6b0cb714292d3252fb067319beb5adbef6d58f9 100644 (file)
@@ -489,6 +489,29 @@ static void __assign_ppgtt(struct i915_gem_context *ctx,
                i915_vm_put(vm);
 }
 
+static void __set_timeline(struct intel_timeline **dst,
+                          struct intel_timeline *src)
+{
+       struct intel_timeline *old = *dst;
+
+       *dst = src ? intel_timeline_get(src) : NULL;
+
+       if (old)
+               intel_timeline_put(old);
+}
+
+static void __apply_timeline(struct intel_context *ce, void *timeline)
+{
+       __set_timeline(&ce->timeline, timeline);
+}
+
+static void __assign_timeline(struct i915_gem_context *ctx,
+                             struct intel_timeline *timeline)
+{
+       __set_timeline(&ctx->timeline, timeline);
+       context_apply_all(ctx, __apply_timeline, timeline);
+}
+
 static struct i915_gem_context *
 i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
 {
@@ -531,7 +554,8 @@ i915_gem_create_context(struct drm_i915_private *dev_priv, unsigned int flags)
                        return ERR_CAST(timeline);
                }
 
-               ctx->timeline = timeline;
+               __assign_timeline(ctx, timeline);
+               intel_timeline_put(timeline);
        }
 
        trace_i915_context_create(ctx);
@@ -1931,13 +1955,8 @@ unlock:
 static int clone_timeline(struct i915_gem_context *dst,
                          struct i915_gem_context *src)
 {
-       if (src->timeline) {
-               GEM_BUG_ON(src->timeline == dst->timeline);
-
-               if (dst->timeline)
-                       intel_timeline_put(dst->timeline);
-               dst->timeline = intel_timeline_get(src->timeline);
-       }
+       if (src->timeline)
+               __assign_timeline(dst, src->timeline);
 
        return 0;
 }
index 2fa08357944ee22660e8bce10a533d263f4e6c3d..1bd2187ac8d65cdd57b8679ea18c7430e499f0cd 100644 (file)
@@ -2182,7 +2182,7 @@ err_unpin:
 static void eb_unpin_context(struct i915_execbuffer *eb)
 {
        struct intel_context *ce = eb->context;
-       struct intel_timeline *tl = ce->ring->timeline;
+       struct intel_timeline *tl = ce->timeline;
 
        mutex_lock(&tl->mutex);
        intel_context_exit(ce);
index 6d1d4e8dbfc941926fba037737977ae69e913960..77833f1558a94e008b7268e3287db2dd44194c12 100644 (file)
@@ -68,7 +68,7 @@ int __intel_context_do_pin(struct intel_context *ce)
                        goto err;
 
                GEM_TRACE("%s context:%llx pin ring:{head:%04x, tail:%04x}\n",
-                         ce->engine->name, ce->ring->timeline->fence_context,
+                         ce->engine->name, ce->timeline->fence_context,
                          ce->ring->head, ce->ring->tail);
 
                i915_gem_context_get(ce->gem_context); /* for ctx->ppgtt */
@@ -98,7 +98,7 @@ void intel_context_unpin(struct intel_context *ce)
 
        if (likely(atomic_dec_and_test(&ce->pin_count))) {
                GEM_TRACE("%s context:%llx retire\n",
-                         ce->engine->name, ce->ring->timeline->fence_context);
+                         ce->engine->name, ce->timeline->fence_context);
 
                ce->ops->unpin(ce);
 
@@ -143,11 +143,12 @@ static void __intel_context_retire(struct i915_active *active)
        struct intel_context *ce = container_of(active, typeof(*ce), active);
 
        GEM_TRACE("%s context:%llx retire\n",
-                 ce->engine->name, ce->ring->timeline->fence_context);
+                 ce->engine->name, ce->timeline->fence_context);
 
        if (ce->state)
                __context_unpin_state(ce->state);
 
+       intel_timeline_unpin(ce->timeline);
        intel_ring_unpin(ce->ring);
        intel_context_put(ce);
 }
@@ -163,15 +164,21 @@ static int __intel_context_active(struct i915_active *active)
        if (err)
                goto err_put;
 
+       err = intel_timeline_pin(ce->timeline);
+       if (err)
+               goto err_ring;
+
        if (!ce->state)
                return 0;
 
        err = __context_pin_state(ce->state);
        if (err)
-               goto err_ring;
+               goto err_timeline;
 
        return 0;
 
+err_timeline:
+       intel_timeline_unpin(ce->timeline);
 err_ring:
        intel_ring_unpin(ce->ring);
 err_put:
@@ -218,6 +225,8 @@ intel_context_init(struct intel_context *ce,
 
        ce->gem_context = ctx;
        ce->vm = i915_vm_get(ctx->vm ?: &engine->gt->ggtt->vm);
+       if (ctx->timeline)
+               ce->timeline = intel_timeline_get(ctx->timeline);
 
        ce->engine = engine;
        ce->ops = engine->cops;
@@ -235,6 +244,8 @@ intel_context_init(struct intel_context *ce,
 
 void intel_context_fini(struct intel_context *ce)
 {
+       if (ce->timeline)
+               intel_timeline_put(ce->timeline);
        i915_vm_put(ce->vm);
 
        mutex_destroy(&ce->pin_mutex);
@@ -279,7 +290,7 @@ void intel_context_exit_engine(struct intel_context *ce)
 int intel_context_prepare_remote_request(struct intel_context *ce,
                                         struct i915_request *rq)
 {
-       struct intel_timeline *tl = ce->ring->timeline;
+       struct intel_timeline *tl = ce->timeline;
        int err;
 
        /* Only suitable for use in remotely modifying this context */
index 13f28dd316bc95c9a1e3890eec7ac4d73a8a863b..9fa8b588f18ee71d902f5af6b8a524007e5096ab 100644 (file)
@@ -120,15 +120,15 @@ static inline void intel_context_put(struct intel_context *ce)
 
 static inline int __must_check
 intel_context_timeline_lock(struct intel_context *ce)
-       __acquires(&ce->ring->timeline->mutex)
+       __acquires(&ce->timeline->mutex)
 {
-       return mutex_lock_interruptible(&ce->ring->timeline->mutex);
+       return mutex_lock_interruptible(&ce->timeline->mutex);
 }
 
 static inline void intel_context_timeline_unlock(struct intel_context *ce)
-       __releases(&ce->ring->timeline->mutex)
+       __releases(&ce->timeline->mutex)
 {
-       mutex_unlock(&ce->ring->timeline->mutex);
+       mutex_unlock(&ce->timeline->mutex);
 }
 
 int intel_context_prepare_remote_request(struct intel_context *ce,
index cff6238c213aba83d6b0617ab86250a14ad0a48f..a632b20ec4d87e6418685532d702020e04c5535d 100644 (file)
@@ -53,6 +53,7 @@ struct intel_context {
 
        struct i915_vma *state;
        struct intel_ring *ring;
+       struct intel_timeline *timeline;
 
        unsigned long flags;
 #define CONTEXT_ALLOC_BIT 0
index 37c391cee4411f12d01e3a87a711cacd7c3c4a85..e1228b0e577f6a54aece12c2281bd148c5a87fc9 100644 (file)
@@ -196,9 +196,7 @@ intel_write_status_page(struct intel_engine_cs *engine, int reg, u32 value)
 #define CNL_HWS_CSB_WRITE_INDEX                0x2f
 
 struct intel_ring *
-intel_engine_create_ring(struct intel_engine_cs *engine,
-                        struct intel_timeline *timeline,
-                        int size);
+intel_engine_create_ring(struct intel_engine_cs *engine, int size);
 int intel_ring_pin(struct intel_ring *ring);
 void intel_ring_reset(struct intel_ring *ring, u32 tail);
 unsigned int intel_ring_update_space(struct intel_ring *ring);
index ece731ab7b3a2c297480c29f20e7fb0a84d6fd77..7d174af30f8c212af5615802374ae6294363b260 100644 (file)
@@ -680,7 +680,6 @@ static int measure_breadcrumb_dw(struct intel_engine_cs *engine)
                goto out_frame;
 
        INIT_LIST_HEAD(&frame->ring.request_list);
-       frame->ring.timeline = &frame->timeline;
        frame->ring.vaddr = frame->cs;
        frame->ring.size = sizeof(frame->cs);
        frame->ring.effective_size = frame->ring.size;
index c79fd1dafa885e92b86da6bdf6f2ad94b9fb7b5c..a0f372807dd4947d0caacb4efd86068cfc5135aa 100644 (file)
@@ -69,7 +69,6 @@ struct intel_ring {
        struct i915_vma *vma;
        void *vaddr;
 
-       struct intel_timeline *timeline;
        struct list_head request_list;
        struct list_head active_link;
 
@@ -286,8 +285,6 @@ struct intel_engine_cs {
 
        struct intel_sseu sseu;
 
-       struct intel_ring *buffer;
-
        struct {
                spinlock_t lock;
                struct list_head requests;
@@ -306,6 +303,11 @@ struct intel_engine_cs {
        struct drm_i915_gem_object *default_state;
        void *pinned_default_state;
 
+       struct {
+               struct intel_ring *ring;
+               struct intel_timeline *timeline;
+       } legacy;
+
        /* Rather than have every client wait upon all user interrupts,
         * with the herd waking after every interrupt and each doing the
         * heavyweight seqno dance, we delegate the task (of being the
index fc5fcc2d03accbc0b416973ec0feeb17a72ba963..bb74954889dd37901b7c07658050dcb7b3790477 100644 (file)
@@ -2821,9 +2821,6 @@ logical_ring_default_irqs(struct intel_engine_cs *engine)
 
 int intel_execlists_submission_setup(struct intel_engine_cs *engine)
 {
-       /* Intentionally left blank. */
-       engine->buffer = NULL;
-
        tasklet_init(&engine->execlists.tasklet,
                     execlists_submission_tasklet, (unsigned long)engine);
        timer_setup(&engine->execlists.timer, execlists_submission_timer, 0);
@@ -3071,23 +3068,13 @@ err_unpin_ctx:
        return ret;
 }
 
-static struct intel_timeline *
-get_timeline(struct i915_gem_context *ctx, struct intel_gt *gt)
-{
-       if (ctx->timeline)
-               return intel_timeline_get(ctx->timeline);
-       else
-               return intel_timeline_create(gt, NULL);
-}
-
 static int __execlists_context_alloc(struct intel_context *ce,
                                     struct intel_engine_cs *engine)
 {
        struct drm_i915_gem_object *ctx_obj;
+       struct intel_ring *ring;
        struct i915_vma *vma;
        u32 context_size;
-       struct intel_ring *ring;
-       struct intel_timeline *timeline;
        int ret;
 
        GEM_BUG_ON(ce->state);
@@ -3109,15 +3096,19 @@ static int __execlists_context_alloc(struct intel_context *ce,
                goto error_deref_obj;
        }
 
-       timeline = get_timeline(ce->gem_context, engine->gt);
-       if (IS_ERR(timeline)) {
-               ret = PTR_ERR(timeline);
-               goto error_deref_obj;
+       if (!ce->timeline) {
+               struct intel_timeline *tl;
+
+               tl = intel_timeline_create(engine->gt, NULL);
+               if (IS_ERR(tl)) {
+                       ret = PTR_ERR(tl);
+                       goto error_deref_obj;
+               }
+
+               ce->timeline = tl;
        }
 
-       ring = intel_engine_create_ring(engine, timeline,
-                                       (unsigned long)ce->ring);
-       intel_timeline_put(timeline);
+       ring = intel_engine_create_ring(engine, (unsigned long)ce->ring);
        if (IS_ERR(ring)) {
                ret = PTR_ERR(ring);
                goto error_deref_obj;
index d5b7e4dde7626f58b31b1dccc8c373ffc7f2d4aa..dfacb265a995db17540fdd3a1916e12ef6c94c34 100644 (file)
@@ -636,7 +636,7 @@ static bool stop_ring(struct intel_engine_cs *engine)
 static int xcs_resume(struct intel_engine_cs *engine)
 {
        struct drm_i915_private *dev_priv = engine->i915;
-       struct intel_ring *ring = engine->buffer;
+       struct intel_ring *ring = engine->legacy.ring;
        int ret = 0;
 
        GEM_TRACE("%s: ring:{HEAD:%04x, TAIL:%04x}\n",
@@ -832,12 +832,12 @@ static void reset_ring(struct intel_engine_cs *engine, bool stalled)
                 */
                __i915_request_reset(rq, stalled);
 
-               GEM_BUG_ON(rq->ring != engine->buffer);
+               GEM_BUG_ON(rq->ring != engine->legacy.ring);
                head = rq->head;
        } else {
-               head = engine->buffer->tail;
+               head = engine->legacy.ring->tail;
        }
-       engine->buffer->head = intel_ring_wrap(engine->buffer, head);
+       engine->legacy.ring->head = intel_ring_wrap(engine->legacy.ring, head);
 
        spin_unlock_irqrestore(&engine->active.lock, flags);
 }
@@ -1192,10 +1192,6 @@ int intel_ring_pin(struct intel_ring *ring)
        if (atomic_fetch_inc(&ring->pin_count))
                return 0;
 
-       ret = intel_timeline_pin(ring->timeline);
-       if (ret)
-               goto err_unpin;
-
        flags = PIN_GLOBAL;
 
        /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
@@ -1208,7 +1204,7 @@ int intel_ring_pin(struct intel_ring *ring)
 
        ret = i915_vma_pin(vma, 0, 0, flags);
        if (unlikely(ret))
-               goto err_timeline;
+               goto err_unpin;
 
        if (i915_vma_is_map_and_fenceable(vma))
                addr = (void __force *)i915_vma_pin_iomap(vma);
@@ -1225,13 +1221,10 @@ int intel_ring_pin(struct intel_ring *ring)
        GEM_BUG_ON(ring->vaddr);
        ring->vaddr = addr;
 
-       GEM_TRACE("ring:%llx pin\n", ring->timeline->fence_context);
        return 0;
 
 err_ring:
        i915_vma_unpin(vma);
-err_timeline:
-       intel_timeline_unpin(ring->timeline);
 err_unpin:
        atomic_dec(&ring->pin_count);
        return ret;
@@ -1254,8 +1247,6 @@ void intel_ring_unpin(struct intel_ring *ring)
        if (!atomic_dec_and_test(&ring->pin_count))
                return;
 
-       GEM_TRACE("ring:%llx unpin\n", ring->timeline->fence_context);
-
        /* Discard any unused bytes beyond that submitted to hw. */
        intel_ring_reset(ring, ring->tail);
 
@@ -1270,8 +1261,6 @@ void intel_ring_unpin(struct intel_ring *ring)
 
        i915_vma_unpin(vma);
        i915_vma_make_purgeable(vma);
-
-       intel_timeline_unpin(ring->timeline);
 }
 
 static struct i915_vma *create_ring_vma(struct i915_ggtt *ggtt, int size)
@@ -1306,9 +1295,7 @@ err:
 }
 
 struct intel_ring *
-intel_engine_create_ring(struct intel_engine_cs *engine,
-                        struct intel_timeline *timeline,
-                        int size)
+intel_engine_create_ring(struct intel_engine_cs *engine, int size)
 {
        struct drm_i915_private *i915 = engine->i915;
        struct intel_ring *ring;
@@ -1323,7 +1310,6 @@ intel_engine_create_ring(struct intel_engine_cs *engine,
 
        kref_init(&ring->ref);
        INIT_LIST_HEAD(&ring->request_list);
-       ring->timeline = intel_timeline_get(timeline);
 
        ring->size = size;
        /* Workaround an erratum on the i830 which causes a hang if
@@ -1353,7 +1339,6 @@ void intel_ring_free(struct kref *ref)
        i915_vma_close(ring->vma);
        i915_vma_put(ring->vma);
 
-       intel_timeline_put(ring->timeline);
        kfree(ring);
 }
 
@@ -1485,8 +1470,9 @@ static int ring_context_alloc(struct intel_context *ce)
        struct intel_engine_cs *engine = ce->engine;
 
        /* One ringbuffer to rule them all */
-       GEM_BUG_ON(!engine->buffer);
-       ce->ring = engine->buffer;
+       GEM_BUG_ON(!engine->legacy.ring);
+       ce->ring = engine->legacy.ring;
+       ce->timeline = intel_timeline_get(engine->legacy.timeline);
 
        GEM_BUG_ON(ce->state);
        if (engine->context_size) {
@@ -2165,8 +2151,11 @@ static void ring_destroy(struct intel_engine_cs *engine)
 
        intel_engine_cleanup_common(engine);
 
-       intel_ring_unpin(engine->buffer);
-       intel_ring_put(engine->buffer);
+       intel_ring_unpin(engine->legacy.ring);
+       intel_ring_put(engine->legacy.ring);
+
+       intel_timeline_unpin(engine->legacy.timeline);
+       intel_timeline_put(engine->legacy.timeline);
 
        kfree(engine);
 }
@@ -2350,32 +2339,40 @@ int intel_ring_submission_init(struct intel_engine_cs *engine)
        }
        GEM_BUG_ON(timeline->has_initial_breadcrumb);
 
-       ring = intel_engine_create_ring(engine, timeline, SZ_16K);
-       intel_timeline_put(timeline);
+       err = intel_timeline_pin(timeline);
+       if (err)
+               goto err_timeline;
+
+       ring = intel_engine_create_ring(engine, SZ_16K);
        if (IS_ERR(ring)) {
                err = PTR_ERR(ring);
-               goto err;
+               goto err_timeline_unpin;
        }
 
        err = intel_ring_pin(ring);
        if (err)
                goto err_ring;
 
-       GEM_BUG_ON(engine->buffer);
-       engine->buffer = ring;
+       GEM_BUG_ON(engine->legacy.ring);
+       engine->legacy.ring = ring;
+       engine->legacy.timeline = timeline;
 
        err = intel_engine_init_common(engine);
        if (err)
-               goto err_unpin;
+               goto err_ring_unpin;
 
-       GEM_BUG_ON(ring->timeline->hwsp_ggtt != engine->status_page.vma);
+       GEM_BUG_ON(timeline->hwsp_ggtt != engine->status_page.vma);
 
        return 0;
 
-err_unpin:
+err_ring_unpin:
        intel_ring_unpin(ring);
 err_ring:
        intel_ring_put(ring);
+err_timeline_unpin:
+       intel_timeline_unpin(timeline);
+err_timeline:
+       intel_timeline_put(timeline);
 err:
        intel_engine_cleanup_common(engine);
        return err;
index 79baed1aaf38a7018842955e426a902d6aa35663..a63dd8a42cd432acf52a90c462858bc5681115bf 100644 (file)
 #include "mock_engine.h"
 #include "selftests/mock_request.h"
 
-struct mock_ring {
-       struct intel_ring base;
-       struct intel_timeline timeline;
-};
-
 static void mock_timeline_pin(struct intel_timeline *tl)
 {
        tl->pin_count++;
@@ -51,36 +46,22 @@ static void mock_timeline_unpin(struct intel_timeline *tl)
 static struct intel_ring *mock_ring(struct intel_engine_cs *engine)
 {
        const unsigned long sz = PAGE_SIZE / 2;
-       struct mock_ring *ring;
+       struct intel_ring *ring;
 
        ring = kzalloc(sizeof(*ring) + sz, GFP_KERNEL);
        if (!ring)
                return NULL;
 
-       if (intel_timeline_init(&ring->timeline, engine->gt, NULL)) {
-               kfree(ring);
-               return NULL;
-       }
-
-       kref_init(&ring->base.ref);
-       ring->base.size = sz;
-       ring->base.effective_size = sz;
-       ring->base.vaddr = (void *)(ring + 1);
-       ring->base.timeline = &ring->timeline;
-       atomic_set(&ring->base.pin_count, 1);
-
-       INIT_LIST_HEAD(&ring->base.request_list);
-       intel_ring_update_space(&ring->base);
-
-       return &ring->base;
-}
+       kref_init(&ring->ref);
+       ring->size = sz;
+       ring->effective_size = sz;
+       ring->vaddr = (void *)(ring + 1);
+       atomic_set(&ring->pin_count, 1);
 
-static void mock_ring_free(struct intel_ring *base)
-{
-       struct mock_ring *ring = container_of(base, typeof(*ring), base);
+       INIT_LIST_HEAD(&ring->request_list);
+       intel_ring_update_space(ring);
 
-       intel_timeline_fini(&ring->timeline);
-       kfree(ring);
+       return ring;
 }
 
 static struct i915_request *first_request(struct mock_engine *engine)
@@ -131,7 +112,6 @@ static void hw_delay_complete(struct timer_list *t)
 
 static void mock_context_unpin(struct intel_context *ce)
 {
-       mock_timeline_unpin(ce->ring->timeline);
 }
 
 static void mock_context_destroy(struct kref *ref)
@@ -140,8 +120,10 @@ static void mock_context_destroy(struct kref *ref)
 
        GEM_BUG_ON(intel_context_is_pinned(ce));
 
-       if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags))
-               mock_ring_free(ce->ring);
+       if (test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
+               kfree(ce->ring);
+               mock_timeline_unpin(ce->timeline);
+       }
 
        intel_context_fini(ce);
        intel_context_free(ce);
@@ -153,19 +135,21 @@ static int mock_context_alloc(struct intel_context *ce)
        if (!ce->ring)
                return -ENOMEM;
 
+       GEM_BUG_ON(ce->timeline);
+       ce->timeline = intel_timeline_create(ce->engine->gt, NULL);
+       if (IS_ERR(ce->timeline)) {
+               kfree(ce->engine);
+               return PTR_ERR(ce->timeline);
+       }
+
+       mock_timeline_pin(ce->timeline);
+
        return 0;
 }
 
 static int mock_context_pin(struct intel_context *ce)
 {
-       int ret;
-
-       ret = intel_context_active_acquire(ce);
-       if (ret)
-               return ret;
-
-       mock_timeline_pin(ce->ring->timeline);
-       return 0;
+       return intel_context_active_acquire(ce);
 }
 
 static const struct intel_context_ops mock_context_ops = {
index 6e7e9a6fd235467ec61ab101734dafd3830d94c3..da9c49e2adaf81c9d9361cf13a877a2112d2a23f 100644 (file)
@@ -32,7 +32,7 @@ static int request_sync(struct i915_request *rq)
 
 static int context_sync(struct intel_context *ce)
 {
-       struct intel_timeline *tl = ce->ring->timeline;
+       struct intel_timeline *tl = ce->timeline;
        int err = 0;
 
        do {
index 1e09722b5317b547a7322e7ec4eef0adac0879c5..7698fcaa648a564d96f861ae94b5270284fb7e84 100644 (file)
@@ -246,7 +246,7 @@ static bool __active_del_barrier(struct i915_active *ref,
        struct llist_node *head = NULL, *tail = NULL;
        struct llist_node *pos, *next;
 
-       GEM_BUG_ON(node->timeline != engine->kernel_context->ring->timeline->fence_context);
+       GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
 
        /*
         * Rebuild the llist excluding our node. We may perform this
@@ -568,7 +568,7 @@ int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
         * i915_active_acquire_barrier()
         */
        for_each_engine_masked(engine, i915, mask, tmp) {
-               u64 idx = engine->kernel_context->ring->timeline->fence_context;
+               u64 idx = engine->kernel_context->timeline->fence_context;
                struct active_node *node;
 
                node = reuse_idle_barrier(ref, idx);
@@ -665,7 +665,7 @@ void i915_request_add_active_barriers(struct i915_request *rq)
        struct llist_node *node, *next;
 
        GEM_BUG_ON(intel_engine_is_virtual(engine));
-       GEM_BUG_ON(rq->timeline != engine->kernel_context->ring->timeline);
+       GEM_BUG_ON(rq->timeline != engine->kernel_context->timeline);
 
        /*
         * Attach the list of proto-fences to the in-flight request such
index 9b2b18f0196b09f9ed51b728e609cb860befa96a..43175bada09ebdb60ca8660bca037785b3510567 100644 (file)
@@ -306,12 +306,12 @@ static bool i915_request_retire(struct i915_request *rq)
 
        local_irq_enable();
 
-       intel_context_exit(rq->hw_context);
-       intel_context_unpin(rq->hw_context);
-
        i915_request_remove_from_client(rq);
        list_del(&rq->link);
 
+       intel_context_exit(rq->hw_context);
+       intel_context_unpin(rq->hw_context);
+
        free_capture_list(rq);
        i915_sched_node_fini(&rq->sched);
        i915_request_put(rq);
@@ -608,7 +608,7 @@ out:
 struct i915_request *
 __i915_request_create(struct intel_context *ce, gfp_t gfp)
 {
-       struct intel_timeline *tl = ce->ring->timeline;
+       struct intel_timeline *tl = ce->timeline;
        struct i915_request *rq;
        u32 seqno;
        int ret;
@@ -760,7 +760,7 @@ i915_request_create(struct intel_context *ce)
                goto err_unlock;
 
        /* Check that we do not interrupt ourselves with a new request */
-       rq->cookie = lockdep_pin_lock(&ce->ring->timeline->mutex);
+       rq->cookie = lockdep_pin_lock(&ce->timeline->mutex);
 
        return rq;
 
index b6449d0a8c17114bc8da47b764940a6cb424cfcb..cb30c669b1b79725a1449a95c08d5c8bd73b53db 100644 (file)
@@ -48,26 +48,29 @@ static int populate_ggtt(struct drm_i915_private *i915,
 {
        unsigned long unbound, bound, count;
        struct drm_i915_gem_object *obj;
-       u64 size;
 
        count = 0;
-       for (size = 0;
-            size + I915_GTT_PAGE_SIZE <= i915->ggtt.vm.total;
-            size += I915_GTT_PAGE_SIZE) {
+       do {
                struct i915_vma *vma;
 
                obj = i915_gem_object_create_internal(i915, I915_GTT_PAGE_SIZE);
                if (IS_ERR(obj))
                        return PTR_ERR(obj);
 
-               quirk_add(obj, objects);
-
                vma = i915_gem_object_ggtt_pin(obj, NULL, 0, 0, 0);
-               if (IS_ERR(vma))
+               if (IS_ERR(vma)) {
+                       i915_gem_object_put(obj);
+                       if (vma == ERR_PTR(-ENOSPC))
+                               break;
+
                        return PTR_ERR(vma);
+               }
 
+               quirk_add(obj, objects);
                count++;
-       }
+       } while (1);
+       pr_debug("Filled GGTT with %lu pages [%llu total]\n",
+                count, i915->ggtt.vm.total / PAGE_SIZE);
 
        bound = 0;
        unbound = 0;