2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include <linux/debugobjects.h>
9 #include "gt/intel_context.h"
10 #include "gt/intel_engine_heartbeat.h"
11 #include "gt/intel_engine_pm.h"
12 #include "gt/intel_ring.h"
15 #include "i915_active.h"
18 * Active refs memory management
20 * To be more economical with memory, we reap all the i915_active trees as
21 * they idle (when we know the active requests are inactive) and allocate the
22 * nodes from a local slab cache to hopefully reduce the fragmentation.
24 static struct kmem_cache *slab_cache;
28 struct i915_active_fence base;
29 struct i915_active *ref;
33 #define fetch_node(x) rb_entry(READ_ONCE(x), typeof(struct active_node), node)
35 static inline struct active_node *
36 node_from_active(struct i915_active_fence *active)
38 return container_of(active, struct active_node, base);
41 #define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
43 static inline bool is_barrier(const struct i915_active_fence *active)
45 return IS_ERR(rcu_access_pointer(active->fence));
48 static inline struct llist_node *barrier_to_ll(struct active_node *node)
50 GEM_BUG_ON(!is_barrier(&node->base));
51 return (struct llist_node *)&node->base.cb.node;
54 static inline struct intel_engine_cs *
55 __barrier_to_engine(struct active_node *node)
57 return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev);
60 static inline struct intel_engine_cs *
61 barrier_to_engine(struct active_node *node)
63 GEM_BUG_ON(!is_barrier(&node->base));
64 return __barrier_to_engine(node);
67 static inline struct active_node *barrier_from_ll(struct llist_node *x)
69 return container_of((struct list_head *)x,
70 struct active_node, base.cb.node);
73 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
75 static void *active_debug_hint(void *addr)
77 struct i915_active *ref = addr;
79 return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
82 static const struct debug_obj_descr active_debug_desc = {
83 .name = "i915_active",
84 .debug_hint = active_debug_hint,
87 static void debug_active_init(struct i915_active *ref)
89 debug_object_init(ref, &active_debug_desc);
92 static void debug_active_activate(struct i915_active *ref)
94 lockdep_assert_held(&ref->tree_lock);
95 if (!atomic_read(&ref->count)) /* before the first inc */
96 debug_object_activate(ref, &active_debug_desc);
99 static void debug_active_deactivate(struct i915_active *ref)
101 lockdep_assert_held(&ref->tree_lock);
102 if (!atomic_read(&ref->count)) /* after the last dec */
103 debug_object_deactivate(ref, &active_debug_desc);
106 static void debug_active_fini(struct i915_active *ref)
108 debug_object_free(ref, &active_debug_desc);
111 static void debug_active_assert(struct i915_active *ref)
113 debug_object_assert_init(ref, &active_debug_desc);
118 static inline void debug_active_init(struct i915_active *ref) { }
119 static inline void debug_active_activate(struct i915_active *ref) { }
120 static inline void debug_active_deactivate(struct i915_active *ref) { }
121 static inline void debug_active_fini(struct i915_active *ref) { }
122 static inline void debug_active_assert(struct i915_active *ref) { }
127 __active_retire(struct i915_active *ref)
129 struct rb_root root = RB_ROOT;
130 struct active_node *it, *n;
133 GEM_BUG_ON(i915_active_is_idle(ref));
135 /* return the unused nodes to our slabcache -- flushing the allocator */
136 if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags))
139 GEM_BUG_ON(rcu_access_pointer(ref->excl.fence));
140 debug_active_deactivate(ref);
142 /* Even if we have not used the cache, we may still have a barrier */
144 ref->cache = fetch_node(ref->tree.rb_node);
146 /* Keep the MRU cached node for reuse */
148 /* Discard all other nodes in the tree */
149 rb_erase(&ref->cache->node, &ref->tree);
152 /* Rebuild the tree with only the cached node */
153 rb_link_node(&ref->cache->node, NULL, &ref->tree.rb_node);
154 rb_insert_color(&ref->cache->node, &ref->tree);
155 GEM_BUG_ON(ref->tree.rb_node != &ref->cache->node);
157 /* Make the cached node available for reuse with any timeline */
158 ref->cache->timeline = 0; /* needs cmpxchg(u64) */
161 spin_unlock_irqrestore(&ref->tree_lock, flags);
163 /* After the final retire, the entire struct may be freed */
167 /* ... except if you wait on it, you must manage your own references! */
170 /* Finally free the discarded timeline tree */
171 rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
172 GEM_BUG_ON(i915_active_fence_isset(&it->base));
173 kmem_cache_free(slab_cache, it);
178 active_work(struct work_struct *wrk)
180 struct i915_active *ref = container_of(wrk, typeof(*ref), work);
182 GEM_BUG_ON(!atomic_read(&ref->count));
183 if (atomic_add_unless(&ref->count, -1, 1))
186 __active_retire(ref);
190 active_retire(struct i915_active *ref)
192 GEM_BUG_ON(!atomic_read(&ref->count));
193 if (atomic_add_unless(&ref->count, -1, 1))
196 if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) {
197 queue_work(system_unbound_wq, &ref->work);
201 __active_retire(ref);
204 static inline struct dma_fence **
205 __active_fence_slot(struct i915_active_fence *active)
207 return (struct dma_fence ** __force)&active->fence;
211 active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
213 struct i915_active_fence *active =
214 container_of(cb, typeof(*active), cb);
216 return cmpxchg(__active_fence_slot(active), fence, NULL) == fence;
220 node_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
222 if (active_fence_cb(fence, cb))
223 active_retire(container_of(cb, struct active_node, base.cb)->ref);
227 excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb)
229 if (active_fence_cb(fence, cb))
230 active_retire(container_of(cb, struct i915_active, excl.cb));
233 static struct active_node *__active_lookup(struct i915_active *ref, u64 idx)
235 struct active_node *it;
237 GEM_BUG_ON(idx == 0); /* 0 is the unordered timeline, rsvd for cache */
240 * We track the most recently used timeline to skip a rbtree search
241 * for the common case, under typical loads we never need the rbtree
242 * at all. We can reuse the last slot if it is empty, that is
243 * after the previous activity has been retired, or if it matches the
246 it = READ_ONCE(ref->cache);
248 u64 cached = READ_ONCE(it->timeline);
250 /* Once claimed, this slot will only belong to this idx */
255 * An unclaimed cache [.timeline=0] can only be claimed once.
257 * If the value is already non-zero, some other thread has
258 * claimed the cache and we know that is does not match our
259 * idx. If, and only if, the timeline is currently zero is it
260 * worth competing to claim it atomically for ourselves (for
261 * only the winner of that race will cmpxchg return the old
264 if (!cached && !cmpxchg64(&it->timeline, 0, idx))
268 BUILD_BUG_ON(offsetof(typeof(*it), node));
270 /* While active, the tree can only be built; not destroyed */
271 GEM_BUG_ON(i915_active_is_idle(ref));
273 it = fetch_node(ref->tree.rb_node);
275 if (it->timeline < idx) {
276 it = fetch_node(it->node.rb_right);
277 } else if (it->timeline > idx) {
278 it = fetch_node(it->node.rb_left);
280 WRITE_ONCE(ref->cache, it);
285 /* NB: If the tree rotated beneath us, we may miss our target. */
289 static struct i915_active_fence *
290 active_instance(struct i915_active *ref, u64 idx)
292 struct active_node *node;
293 struct rb_node **p, *parent;
295 node = __active_lookup(ref, idx);
299 spin_lock_irq(&ref->tree_lock);
300 GEM_BUG_ON(i915_active_is_idle(ref));
303 p = &ref->tree.rb_node;
307 node = rb_entry(parent, struct active_node, node);
308 if (node->timeline == idx)
311 if (node->timeline < idx)
312 p = &parent->rb_right;
314 p = &parent->rb_left;
318 * XXX: We should preallocate this before i915_active_ref() is ever
319 * called, but we cannot call into fs_reclaim() anyway, so use GFP_ATOMIC.
321 node = kmem_cache_alloc(slab_cache, GFP_ATOMIC);
325 __i915_active_fence_init(&node->base, NULL, node_retire);
327 node->timeline = idx;
329 rb_link_node(&node->node, parent, p);
330 rb_insert_color(&node->node, &ref->tree);
333 WRITE_ONCE(ref->cache, node);
334 spin_unlock_irq(&ref->tree_lock);
339 void __i915_active_init(struct i915_active *ref,
340 int (*active)(struct i915_active *ref),
341 void (*retire)(struct i915_active *ref),
343 struct lock_class_key *mkey,
344 struct lock_class_key *wkey)
346 debug_active_init(ref);
349 ref->active = active;
350 ref->retire = retire;
352 spin_lock_init(&ref->tree_lock);
356 init_llist_head(&ref->preallocated_barriers);
357 atomic_set(&ref->count, 0);
358 __mutex_init(&ref->mutex, "i915_active", mkey);
359 __i915_active_fence_init(&ref->excl, NULL, excl_retire);
360 INIT_WORK(&ref->work, active_work);
361 #if IS_ENABLED(CONFIG_LOCKDEP)
362 lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0);
366 static bool ____active_del_barrier(struct i915_active *ref,
367 struct active_node *node,
368 struct intel_engine_cs *engine)
371 struct llist_node *head = NULL, *tail = NULL;
372 struct llist_node *pos, *next;
374 GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
377 * Rebuild the llist excluding our node. We may perform this
378 * outside of the kernel_context timeline mutex and so someone
379 * else may be manipulating the engine->barrier_tasks, in
380 * which case either we or they will be upset :)
382 * A second __active_del_barrier() will report failure to claim
383 * the active_node and the caller will just shrug and know not to
384 * claim ownership of its node.
386 * A concurrent i915_request_add_active_barriers() will miss adding
387 * any of the tasks, but we will try again on the next -- and since
388 * we are actively using the barrier, we know that there will be
389 * at least another opportunity when we idle.
391 llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) {
392 if (node == barrier_from_ll(pos)) {
403 llist_add_batch(head, tail, &engine->barrier_tasks);
409 __active_del_barrier(struct i915_active *ref, struct active_node *node)
411 return ____active_del_barrier(ref, node, barrier_to_engine(node));
415 replace_barrier(struct i915_active *ref, struct i915_active_fence *active)
417 if (!is_barrier(active)) /* proto-node used by our idle barrier? */
421 * This request is on the kernel_context timeline, and so
422 * we can use it to substitute for the pending idle-barrer
423 * request that we want to emit on the kernel_context.
425 __active_del_barrier(ref, node_from_active(active));
429 int i915_active_add_request(struct i915_active *ref, struct i915_request *rq)
431 struct dma_fence *fence = &rq->fence;
432 struct i915_active_fence *active;
435 /* Prevent reaping in case we malloc/wait while building the tree */
436 err = i915_active_acquire(ref);
440 active = active_instance(ref, i915_request_timeline(rq)->fence_context);
446 if (replace_barrier(ref, active)) {
447 RCU_INIT_POINTER(active->fence, NULL);
448 atomic_dec(&ref->count);
450 if (!__i915_active_fence_set(active, fence))
451 __i915_active_acquire(ref);
454 i915_active_release(ref);
458 static struct dma_fence *
459 __i915_active_set_fence(struct i915_active *ref,
460 struct i915_active_fence *active,
461 struct dma_fence *fence)
463 struct dma_fence *prev;
465 if (replace_barrier(ref, active)) {
466 RCU_INIT_POINTER(active->fence, fence);
471 prev = __i915_active_fence_set(active, fence);
473 prev = dma_fence_get_rcu(prev);
475 __i915_active_acquire(ref);
482 i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f)
484 /* We expect the caller to manage the exclusive timeline ordering */
485 return __i915_active_set_fence(ref, &ref->excl, f);
488 bool i915_active_acquire_if_busy(struct i915_active *ref)
490 debug_active_assert(ref);
491 return atomic_add_unless(&ref->count, 1, 0);
494 static void __i915_active_activate(struct i915_active *ref)
496 spin_lock_irq(&ref->tree_lock); /* __active_retire() */
497 if (!atomic_fetch_inc(&ref->count))
498 debug_active_activate(ref);
499 spin_unlock_irq(&ref->tree_lock);
502 int i915_active_acquire(struct i915_active *ref)
506 if (i915_active_acquire_if_busy(ref))
510 __i915_active_activate(ref);
514 err = mutex_lock_interruptible(&ref->mutex);
518 if (likely(!i915_active_acquire_if_busy(ref))) {
519 err = ref->active(ref);
521 __i915_active_activate(ref);
524 mutex_unlock(&ref->mutex);
529 int i915_active_acquire_for_context(struct i915_active *ref, u64 idx)
531 struct i915_active_fence *active;
534 err = i915_active_acquire(ref);
538 active = active_instance(ref, idx);
540 i915_active_release(ref);
544 return 0; /* return with active ref */
547 void i915_active_release(struct i915_active *ref)
549 debug_active_assert(ref);
553 static void enable_signaling(struct i915_active_fence *active)
555 struct dma_fence *fence;
557 if (unlikely(is_barrier(active)))
560 fence = i915_active_fence_get(active);
564 dma_fence_enable_sw_signaling(fence);
565 dma_fence_put(fence);
568 static int flush_barrier(struct active_node *it)
570 struct intel_engine_cs *engine;
572 if (likely(!is_barrier(&it->base)))
575 engine = __barrier_to_engine(it);
576 smp_rmb(); /* serialise with add_active_barriers */
577 if (!is_barrier(&it->base))
580 return intel_engine_flush_barriers(engine);
583 static int flush_lazy_signals(struct i915_active *ref)
585 struct active_node *it, *n;
588 enable_signaling(&ref->excl);
589 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
590 err = flush_barrier(it); /* unconnected idle barrier? */
594 enable_signaling(&it->base);
600 int __i915_active_wait(struct i915_active *ref, int state)
604 /* Any fence added after the wait begins will not be auto-signaled */
605 if (i915_active_acquire_if_busy(ref)) {
608 err = flush_lazy_signals(ref);
609 i915_active_release(ref);
613 if (___wait_var_event(ref, i915_active_is_idle(ref),
614 state, 0, 0, schedule()))
619 * After the wait is complete, the caller may free the active.
620 * We have to flush any concurrent retirement before returning.
622 flush_work(&ref->work);
626 static int __await_active(struct i915_active_fence *active,
627 int (*fn)(void *arg, struct dma_fence *fence),
630 struct dma_fence *fence;
632 if (is_barrier(active)) /* XXX flush the barrier? */
635 fence = i915_active_fence_get(active);
639 err = fn(arg, fence);
640 dma_fence_put(fence);
648 struct wait_barrier {
649 struct wait_queue_entry base;
650 struct i915_active *ref;
654 barrier_wake(wait_queue_entry_t *wq, unsigned int mode, int flags, void *key)
656 struct wait_barrier *wb = container_of(wq, typeof(*wb), base);
658 if (i915_active_is_idle(wb->ref)) {
659 list_del(&wq->entry);
660 i915_sw_fence_complete(wq->private);
667 static int __await_barrier(struct i915_active *ref, struct i915_sw_fence *fence)
669 struct wait_barrier *wb;
671 wb = kmalloc(sizeof(*wb), GFP_KERNEL);
675 GEM_BUG_ON(i915_active_is_idle(ref));
676 if (!i915_sw_fence_await(fence)) {
682 wb->base.func = barrier_wake;
683 wb->base.private = fence;
686 add_wait_queue(__var_waitqueue(ref), &wb->base);
690 static int await_active(struct i915_active *ref,
692 int (*fn)(void *arg, struct dma_fence *fence),
693 void *arg, struct i915_sw_fence *barrier)
697 if (!i915_active_acquire_if_busy(ref))
700 if (flags & I915_ACTIVE_AWAIT_EXCL &&
701 rcu_access_pointer(ref->excl.fence)) {
702 err = __await_active(&ref->excl, fn, arg);
707 if (flags & I915_ACTIVE_AWAIT_ACTIVE) {
708 struct active_node *it, *n;
710 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
711 err = __await_active(&it->base, fn, arg);
717 if (flags & I915_ACTIVE_AWAIT_BARRIER) {
718 err = flush_lazy_signals(ref);
722 err = __await_barrier(ref, barrier);
728 i915_active_release(ref);
732 static int rq_await_fence(void *arg, struct dma_fence *fence)
734 return i915_request_await_dma_fence(arg, fence);
737 int i915_request_await_active(struct i915_request *rq,
738 struct i915_active *ref,
741 return await_active(ref, flags, rq_await_fence, rq, &rq->submit);
744 static int sw_await_fence(void *arg, struct dma_fence *fence)
746 return i915_sw_fence_await_dma_fence(arg, fence, 0,
747 GFP_NOWAIT | __GFP_NOWARN);
750 int i915_sw_fence_await_active(struct i915_sw_fence *fence,
751 struct i915_active *ref,
754 return await_active(ref, flags, sw_await_fence, fence, fence);
757 void i915_active_fini(struct i915_active *ref)
759 debug_active_fini(ref);
760 GEM_BUG_ON(atomic_read(&ref->count));
761 GEM_BUG_ON(work_pending(&ref->work));
762 mutex_destroy(&ref->mutex);
765 kmem_cache_free(slab_cache, ref->cache);
768 static inline bool is_idle_barrier(struct active_node *node, u64 idx)
770 return node->timeline == idx && !i915_active_fence_isset(&node->base);
773 static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
775 struct rb_node *prev, *p;
777 if (RB_EMPTY_ROOT(&ref->tree))
780 GEM_BUG_ON(i915_active_is_idle(ref));
783 * Try to reuse any existing barrier nodes already allocated for this
784 * i915_active, due to overlapping active phases there is likely a
785 * node kept alive (as we reuse before parking). We prefer to reuse
786 * completely idle barriers (less hassle in manipulating the llists),
787 * but otherwise any will do.
789 if (ref->cache && is_idle_barrier(ref->cache, idx)) {
790 p = &ref->cache->node;
795 p = ref->tree.rb_node;
797 struct active_node *node =
798 rb_entry(p, struct active_node, node);
800 if (is_idle_barrier(node, idx))
804 if (node->timeline < idx)
805 p = READ_ONCE(p->rb_right);
807 p = READ_ONCE(p->rb_left);
811 * No quick match, but we did find the leftmost rb_node for the
812 * kernel_context. Walk the rb_tree in-order to see if there were
813 * any idle-barriers on this timeline that we missed, or just use
814 * the first pending barrier.
816 for (p = prev; p; p = rb_next(p)) {
817 struct active_node *node =
818 rb_entry(p, struct active_node, node);
819 struct intel_engine_cs *engine;
821 if (node->timeline > idx)
824 if (node->timeline < idx)
827 if (is_idle_barrier(node, idx))
831 * The list of pending barriers is protected by the
832 * kernel_context timeline, which notably we do not hold
833 * here. i915_request_add_active_barriers() may consume
834 * the barrier before we claim it, so we have to check
837 engine = __barrier_to_engine(node);
838 smp_rmb(); /* serialise with add_active_barriers */
839 if (is_barrier(&node->base) &&
840 ____active_del_barrier(ref, node, engine))
847 spin_lock_irq(&ref->tree_lock);
848 rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
849 if (p == &ref->cache->node)
850 WRITE_ONCE(ref->cache, NULL);
851 spin_unlock_irq(&ref->tree_lock);
853 return rb_entry(p, struct active_node, node);
856 int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
857 struct intel_engine_cs *engine)
859 intel_engine_mask_t tmp, mask = engine->mask;
860 struct llist_node *first = NULL, *last = NULL;
861 struct intel_gt *gt = engine->gt;
863 GEM_BUG_ON(i915_active_is_idle(ref));
865 /* Wait until the previous preallocation is completed */
866 while (!llist_empty(&ref->preallocated_barriers))
870 * Preallocate a node for each physical engine supporting the target
871 * engine (remember virtual engines have more than one sibling).
872 * We can then use the preallocated nodes in
873 * i915_active_acquire_barrier()
876 for_each_engine_masked(engine, gt, mask, tmp) {
877 u64 idx = engine->kernel_context->timeline->fence_context;
878 struct llist_node *prev = first;
879 struct active_node *node;
882 node = reuse_idle_barrier(ref, idx);
885 node = kmem_cache_alloc(slab_cache, GFP_KERNEL);
889 RCU_INIT_POINTER(node->base.fence, NULL);
890 node->base.cb.func = node_retire;
891 node->timeline = idx;
895 if (!i915_active_fence_isset(&node->base)) {
897 * Mark this as being *our* unconnected proto-node.
899 * Since this node is not in any list, and we have
900 * decoupled it from the rbtree, we can reuse the
901 * request to indicate this is an idle-barrier node
902 * and then we can use the rb_node and list pointers
903 * for our tracking of the pending barrier.
905 RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN));
906 node->base.cb.node.prev = (void *)engine;
907 __i915_active_acquire(ref);
909 GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN));
911 GEM_BUG_ON(barrier_to_engine(node) != engine);
912 first = barrier_to_ll(node);
916 intel_engine_pm_get(engine);
919 GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
920 llist_add_batch(first, last, &ref->preallocated_barriers);
926 struct active_node *node = barrier_from_ll(first);
930 atomic_dec(&ref->count);
931 intel_engine_pm_put(barrier_to_engine(node));
933 kmem_cache_free(slab_cache, node);
938 void i915_active_acquire_barrier(struct i915_active *ref)
940 struct llist_node *pos, *next;
943 GEM_BUG_ON(i915_active_is_idle(ref));
946 * Transfer the list of preallocated barriers into the
947 * i915_active rbtree, but only as proto-nodes. They will be
948 * populated by i915_request_add_active_barriers() to point to the
949 * request that will eventually release them.
951 llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
952 struct active_node *node = barrier_from_ll(pos);
953 struct intel_engine_cs *engine = barrier_to_engine(node);
954 struct rb_node **p, *parent;
956 spin_lock_irqsave_nested(&ref->tree_lock, flags,
957 SINGLE_DEPTH_NESTING);
959 p = &ref->tree.rb_node;
961 struct active_node *it;
965 it = rb_entry(parent, struct active_node, node);
966 if (it->timeline < node->timeline)
967 p = &parent->rb_right;
969 p = &parent->rb_left;
971 rb_link_node(&node->node, parent, p);
972 rb_insert_color(&node->node, &ref->tree);
973 spin_unlock_irqrestore(&ref->tree_lock, flags);
975 GEM_BUG_ON(!intel_engine_pm_is_awake(engine));
976 llist_add(barrier_to_ll(node), &engine->barrier_tasks);
977 intel_engine_pm_put_delay(engine, 2);
981 static struct dma_fence **ll_to_fence_slot(struct llist_node *node)
983 return __active_fence_slot(&barrier_from_ll(node)->base);
986 void i915_request_add_active_barriers(struct i915_request *rq)
988 struct intel_engine_cs *engine = rq->engine;
989 struct llist_node *node, *next;
992 GEM_BUG_ON(!intel_context_is_barrier(rq->context));
993 GEM_BUG_ON(intel_engine_is_virtual(engine));
994 GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline);
996 node = llist_del_all(&engine->barrier_tasks);
1000 * Attach the list of proto-fences to the in-flight request such
1001 * that the parent i915_active will be released when this request
1004 spin_lock_irqsave(&rq->lock, flags);
1005 llist_for_each_safe(node, next, node) {
1006 /* serialise with reuse_idle_barrier */
1007 smp_store_mb(*ll_to_fence_slot(node), &rq->fence);
1008 list_add_tail((struct list_head *)node, &rq->fence.cb_list);
1010 spin_unlock_irqrestore(&rq->lock, flags);
1014 * __i915_active_fence_set: Update the last active fence along its timeline
1015 * @active: the active tracker
1016 * @fence: the new fence (under construction)
1018 * Records the new @fence as the last active fence along its timeline in
1019 * this active tracker, moving the tracking callbacks from the previous
1020 * fence onto this one. Returns the previous fence (if not already completed),
1021 * which the caller must ensure is executed before the new fence. To ensure
1022 * that the order of fences within the timeline of the i915_active_fence is
1023 * understood, it should be locked by the caller.
1026 __i915_active_fence_set(struct i915_active_fence *active,
1027 struct dma_fence *fence)
1029 struct dma_fence *prev;
1030 unsigned long flags;
1032 if (fence == rcu_access_pointer(active->fence))
1035 GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags));
1038 * Consider that we have two threads arriving (A and B), with
1039 * C already resident as the active->fence.
1041 * A does the xchg first, and so it sees C or NULL depending
1042 * on the timing of the interrupt handler. If it is NULL, the
1043 * previous fence must have been signaled and we know that
1044 * we are first on the timeline. If it is still present,
1045 * we acquire the lock on that fence and serialise with the interrupt
1046 * handler, in the process removing it from any future interrupt
1047 * callback. A will then wait on C before executing (if present).
1049 * As B is second, it sees A as the previous fence and so waits for
1050 * it to complete its transition and takes over the occupancy for
1051 * itself -- remembering that it needs to wait on A before executing.
1053 * Note the strong ordering of the timeline also provides consistent
1054 * nesting rules for the fence->lock; the inner lock is always the
1057 spin_lock_irqsave(fence->lock, flags);
1058 prev = xchg(__active_fence_slot(active), fence);
1060 GEM_BUG_ON(prev == fence);
1061 spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING);
1062 __list_del_entry(&active->cb.node);
1063 spin_unlock(prev->lock); /* serialise with prev->cb_list */
1065 list_add_tail(&active->cb.node, &fence->cb_list);
1066 spin_unlock_irqrestore(fence->lock, flags);
1071 int i915_active_fence_set(struct i915_active_fence *active,
1072 struct i915_request *rq)
1074 struct dma_fence *fence;
1077 /* Must maintain timeline ordering wrt previous active requests */
1079 fence = __i915_active_fence_set(active, &rq->fence);
1080 if (fence) /* but the previous fence may not belong to that timeline! */
1081 fence = dma_fence_get_rcu(fence);
1084 err = i915_request_await_dma_fence(rq, fence);
1085 dma_fence_put(fence);
1091 void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb)
1093 active_fence_cb(fence, cb);
1096 struct auto_active {
1097 struct i915_active base;
1101 struct i915_active *i915_active_get(struct i915_active *ref)
1103 struct auto_active *aa = container_of(ref, typeof(*aa), base);
1109 static void auto_release(struct kref *ref)
1111 struct auto_active *aa = container_of(ref, typeof(*aa), ref);
1113 i915_active_fini(&aa->base);
1117 void i915_active_put(struct i915_active *ref)
1119 struct auto_active *aa = container_of(ref, typeof(*aa), base);
1121 kref_put(&aa->ref, auto_release);
1124 static int auto_active(struct i915_active *ref)
1126 i915_active_get(ref);
1130 static void auto_retire(struct i915_active *ref)
1132 i915_active_put(ref);
1135 struct i915_active *i915_active_create(void)
1137 struct auto_active *aa;
1139 aa = kmalloc(sizeof(*aa), GFP_KERNEL);
1143 kref_init(&aa->ref);
1144 i915_active_init(&aa->base, auto_active, auto_retire, 0);
1149 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1150 #include "selftests/i915_active.c"
1153 void i915_active_module_exit(void)
1155 kmem_cache_destroy(slab_cache);
1158 int __init i915_active_module_init(void)
1160 slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);