2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include "gem/i915_gem_context.h"
8 #include "gem/i915_gem_pm.h"
11 #include "i915_globals.h"
13 #include "intel_context.h"
14 #include "intel_engine.h"
15 #include "intel_engine_pm.h"
16 #include "intel_ring.h"
18 static struct i915_global_context {
19 struct i915_global base;
20 struct kmem_cache *slab_ce;
23 static struct intel_context *intel_context_alloc(void)
25 return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
28 void intel_context_free(struct intel_context *ce)
30 kmem_cache_free(global.slab_ce, ce);
33 struct intel_context *
34 intel_context_create(struct intel_engine_cs *engine)
36 struct intel_context *ce;
38 ce = intel_context_alloc();
40 return ERR_PTR(-ENOMEM);
42 intel_context_init(ce, engine);
46 int intel_context_alloc_state(struct intel_context *ce)
50 if (mutex_lock_interruptible(&ce->pin_mutex))
53 if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) {
54 if (intel_context_is_banned(ce)) {
59 err = ce->ops->alloc(ce);
63 set_bit(CONTEXT_ALLOC_BIT, &ce->flags);
67 mutex_unlock(&ce->pin_mutex);
71 static int intel_context_active_acquire(struct intel_context *ce)
75 __i915_active_acquire(&ce->active);
77 if (intel_context_is_barrier(ce))
80 /* Preallocate tracking nodes */
81 err = i915_active_acquire_preallocate_barrier(&ce->active,
84 i915_active_release(&ce->active);
89 static void intel_context_active_release(struct intel_context *ce)
91 /* Nodes preallocated in intel_context_active() */
92 i915_active_acquire_barrier(&ce->active);
93 i915_active_release(&ce->active);
96 static int __context_pin_state(struct i915_vma *vma, struct i915_gem_ww_ctx *ww)
98 unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS;
101 err = i915_ggtt_pin(vma, ww, 0, bias | PIN_HIGH);
105 err = i915_active_acquire(&vma->active);
110 * And mark it as a globally pinned object to let the shrinker know
111 * it cannot reclaim the object until we release it.
113 i915_vma_make_unshrinkable(vma);
114 vma->obj->mm.dirty = true;
123 static void __context_unpin_state(struct i915_vma *vma)
125 i915_vma_make_shrinkable(vma);
126 i915_active_release(&vma->active);
127 __i915_vma_unpin(vma);
130 static int __ring_active(struct intel_ring *ring,
131 struct i915_gem_ww_ctx *ww)
135 err = intel_ring_pin(ring, ww);
139 err = i915_active_acquire(&ring->vma->active);
146 intel_ring_unpin(ring);
150 static void __ring_retire(struct intel_ring *ring)
152 i915_active_release(&ring->vma->active);
153 intel_ring_unpin(ring);
156 static int intel_context_pre_pin(struct intel_context *ce,
157 struct i915_gem_ww_ctx *ww)
161 CE_TRACE(ce, "active\n");
163 err = __ring_active(ce->ring, ww);
167 err = intel_timeline_pin(ce->timeline, ww);
174 err = __context_pin_state(ce->state, ww);
182 intel_timeline_unpin(ce->timeline);
184 __ring_retire(ce->ring);
188 static void intel_context_post_unpin(struct intel_context *ce)
191 __context_unpin_state(ce->state);
193 intel_timeline_unpin(ce->timeline);
194 __ring_retire(ce->ring);
197 int __intel_context_do_pin_ww(struct intel_context *ce,
198 struct i915_gem_ww_ctx *ww)
200 bool handoff = false;
204 if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) {
205 err = intel_context_alloc_state(ce);
211 * We always pin the context/ring/timeline here, to ensure a pin
212 * refcount for __intel_context_active(), which prevent a lock
213 * inversion of ce->pin_mutex vs dma_resv_lock().
216 err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww);
217 if (!err && ce->ring->vma->obj)
218 err = i915_gem_object_lock(ce->ring->vma->obj, ww);
219 if (!err && ce->state)
220 err = i915_gem_object_lock(ce->state->obj, ww);
222 err = intel_context_pre_pin(ce, ww);
226 err = i915_active_acquire(&ce->active);
230 err = ce->ops->pre_pin(ce, ww, &vaddr);
234 err = mutex_lock_interruptible(&ce->pin_mutex);
238 if (unlikely(intel_context_is_closed(ce))) {
243 if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) {
244 err = intel_context_active_acquire(ce);
248 err = ce->ops->pin(ce, vaddr);
250 intel_context_active_release(ce);
254 CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n",
255 i915_ggtt_offset(ce->ring->vma),
256 ce->ring->head, ce->ring->tail);
259 smp_mb__before_atomic(); /* flush pin before it is visible */
260 atomic_inc(&ce->pin_count);
263 GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
266 mutex_unlock(&ce->pin_mutex);
269 ce->ops->post_unpin(ce);
271 i915_active_release(&ce->active);
273 intel_context_post_unpin(ce);
276 * Unlock the hwsp_ggtt object since it's shared.
277 * In principle we can unlock all the global state locked above
278 * since it's pinned and doesn't need fencing, and will
279 * thus remain resident until it is explicitly unpinned.
281 i915_gem_ww_unlock_single(ce->timeline->hwsp_ggtt->obj);
286 int __intel_context_do_pin(struct intel_context *ce)
288 struct i915_gem_ww_ctx ww;
291 i915_gem_ww_ctx_init(&ww, true);
293 err = __intel_context_do_pin_ww(ce, &ww);
294 if (err == -EDEADLK) {
295 err = i915_gem_ww_ctx_backoff(&ww);
299 i915_gem_ww_ctx_fini(&ww);
303 void intel_context_unpin(struct intel_context *ce)
305 if (!atomic_dec_and_test(&ce->pin_count))
308 CE_TRACE(ce, "unpin\n");
310 ce->ops->post_unpin(ce);
313 * Once released, we may asynchronously drop the active reference.
314 * As that may be the only reference keeping the context alive,
315 * take an extra now so that it is not freed before we finish
318 intel_context_get(ce);
319 intel_context_active_release(ce);
320 intel_context_put(ce);
324 static void __intel_context_retire(struct i915_active *active)
326 struct intel_context *ce = container_of(active, typeof(*ce), active);
328 CE_TRACE(ce, "retire runtime: { total:%lluns, avg:%lluns }\n",
329 intel_context_get_total_runtime_ns(ce),
330 intel_context_get_avg_runtime_ns(ce));
332 set_bit(CONTEXT_VALID_BIT, &ce->flags);
333 intel_context_post_unpin(ce);
334 intel_context_put(ce);
337 static int __intel_context_active(struct i915_active *active)
339 struct intel_context *ce = container_of(active, typeof(*ce), active);
341 intel_context_get(ce);
343 /* everything should already be activated by intel_context_pre_pin() */
344 GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->ring->vma->active));
345 __intel_ring_pin(ce->ring);
347 __intel_timeline_pin(ce->timeline);
350 GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->state->active));
351 __i915_vma_pin(ce->state);
352 i915_vma_make_unshrinkable(ce->state);
359 intel_context_init(struct intel_context *ce,
360 struct intel_engine_cs *engine)
362 GEM_BUG_ON(!engine->cops);
363 GEM_BUG_ON(!engine->gt->vm);
368 ce->ops = engine->cops;
369 ce->sseu = engine->sseu;
370 ce->ring = __intel_context_ring_size(SZ_4K);
372 ewma_runtime_init(&ce->runtime.avg);
374 ce->vm = i915_vm_get(engine->gt->vm);
376 INIT_LIST_HEAD(&ce->signal_link);
377 INIT_LIST_HEAD(&ce->signals);
379 mutex_init(&ce->pin_mutex);
381 i915_active_init(&ce->active,
382 __intel_context_active, __intel_context_retire);
385 void intel_context_fini(struct intel_context *ce)
388 intel_timeline_put(ce->timeline);
391 mutex_destroy(&ce->pin_mutex);
392 i915_active_fini(&ce->active);
395 static void i915_global_context_shrink(void)
397 kmem_cache_shrink(global.slab_ce);
400 static void i915_global_context_exit(void)
402 kmem_cache_destroy(global.slab_ce);
405 static struct i915_global_context global = { {
406 .shrink = i915_global_context_shrink,
407 .exit = i915_global_context_exit,
410 int __init i915_global_context_init(void)
412 global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
416 i915_global_register(&global.base);
420 void intel_context_enter_engine(struct intel_context *ce)
422 intel_engine_pm_get(ce->engine);
423 intel_timeline_enter(ce->timeline);
426 void intel_context_exit_engine(struct intel_context *ce)
428 intel_timeline_exit(ce->timeline);
429 intel_engine_pm_put(ce->engine);
432 int intel_context_prepare_remote_request(struct intel_context *ce,
433 struct i915_request *rq)
435 struct intel_timeline *tl = ce->timeline;
438 /* Only suitable for use in remotely modifying this context */
439 GEM_BUG_ON(rq->context == ce);
441 if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */
442 /* Queue this switch after current activity by this context. */
443 err = i915_active_fence_set(&tl->last_request, rq);
449 * Guarantee context image and the timeline remains pinned until the
450 * modifying request is retired by setting the ce activity tracker.
452 * But we only need to take one pin on the account of it. Or in other
453 * words transfer the pinned ce object to tracked active request.
455 GEM_BUG_ON(i915_active_is_idle(&ce->active));
456 return i915_active_add_request(&ce->active, rq);
459 struct i915_request *intel_context_create_request(struct intel_context *ce)
461 struct i915_gem_ww_ctx ww;
462 struct i915_request *rq;
465 i915_gem_ww_ctx_init(&ww, true);
467 err = intel_context_pin_ww(ce, &ww);
469 rq = i915_request_create(ce);
470 intel_context_unpin(ce);
471 } else if (err == -EDEADLK) {
472 err = i915_gem_ww_ctx_backoff(&ww);
479 i915_gem_ww_ctx_fini(&ww);
485 * timeline->mutex should be the inner lock, but is used as outer lock.
486 * Hack around this to shut up lockdep in selftests..
488 lockdep_unpin_lock(&ce->timeline->mutex, rq->cookie);
489 mutex_release(&ce->timeline->mutex.dep_map, _RET_IP_);
490 mutex_acquire(&ce->timeline->mutex.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_);
491 rq->cookie = lockdep_pin_lock(&ce->timeline->mutex);
496 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
497 #include "selftest_context.c"