2 * SPDX-License-Identifier: MIT
4 * Copyright © 2019 Intel Corporation
7 #include <linux/debugobjects.h>
9 #include "gt/intel_engine_pm.h"
12 #include "i915_active.h"
13 #include "i915_globals.h"
15 #define BKL(ref) (&(ref)->i915->drm.struct_mutex)
18 * Active refs memory management
20 * To be more economical with memory, we reap all the i915_active trees as
21 * they idle (when we know the active requests are inactive) and allocate the
22 * nodes from a local slab cache to hopefully reduce the fragmentation.
24 static struct i915_global_active {
25 struct i915_global base;
26 struct kmem_cache *slab_cache;
30 struct i915_active_request base;
31 struct i915_active *ref;
36 static inline struct active_node *
37 node_from_active(struct i915_active_request *active)
39 return container_of(active, struct active_node, base);
42 #define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers)
44 static inline bool is_barrier(const struct i915_active_request *active)
46 return IS_ERR(rcu_access_pointer(active->request));
49 static inline struct llist_node *barrier_to_ll(struct active_node *node)
51 GEM_BUG_ON(!is_barrier(&node->base));
52 return (struct llist_node *)&node->base.link;
55 static inline struct intel_engine_cs *
56 __barrier_to_engine(struct active_node *node)
58 return (struct intel_engine_cs *)READ_ONCE(node->base.link.prev);
61 static inline struct intel_engine_cs *
62 barrier_to_engine(struct active_node *node)
64 GEM_BUG_ON(!is_barrier(&node->base));
65 return __barrier_to_engine(node);
68 static inline struct active_node *barrier_from_ll(struct llist_node *x)
70 return container_of((struct list_head *)x,
71 struct active_node, base.link);
74 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS)
76 static void *active_debug_hint(void *addr)
78 struct i915_active *ref = addr;
80 return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref;
83 static struct debug_obj_descr active_debug_desc = {
84 .name = "i915_active",
85 .debug_hint = active_debug_hint,
88 static void debug_active_init(struct i915_active *ref)
90 debug_object_init(ref, &active_debug_desc);
93 static void debug_active_activate(struct i915_active *ref)
95 debug_object_activate(ref, &active_debug_desc);
98 static void debug_active_deactivate(struct i915_active *ref)
100 debug_object_deactivate(ref, &active_debug_desc);
103 static void debug_active_fini(struct i915_active *ref)
105 debug_object_free(ref, &active_debug_desc);
108 static void debug_active_assert(struct i915_active *ref)
110 debug_object_assert_init(ref, &active_debug_desc);
115 static inline void debug_active_init(struct i915_active *ref) { }
116 static inline void debug_active_activate(struct i915_active *ref) { }
117 static inline void debug_active_deactivate(struct i915_active *ref) { }
118 static inline void debug_active_fini(struct i915_active *ref) { }
119 static inline void debug_active_assert(struct i915_active *ref) { }
124 __active_retire(struct i915_active *ref)
126 struct active_node *it, *n;
130 lockdep_assert_held(&ref->mutex);
132 /* return the unused nodes to our slabcache -- flushing the allocator */
133 if (atomic_dec_and_test(&ref->count)) {
134 debug_active_deactivate(ref);
141 mutex_unlock(&ref->mutex);
145 rbtree_postorder_for_each_entry_safe(it, n, &root, node) {
146 GEM_BUG_ON(i915_active_request_isset(&it->base));
147 kmem_cache_free(global.slab_cache, it);
150 /* After the final retire, the entire struct may be freed */
156 active_retire(struct i915_active *ref)
158 GEM_BUG_ON(!atomic_read(&ref->count));
159 if (atomic_add_unless(&ref->count, -1, 1))
162 /* One active may be flushed from inside the acquire of another */
163 mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
164 __active_retire(ref);
168 node_retire(struct i915_active_request *base, struct i915_request *rq)
170 active_retire(node_from_active(base)->ref);
173 static struct i915_active_request *
174 active_instance(struct i915_active *ref, struct intel_timeline *tl)
176 struct active_node *node, *prealloc;
177 struct rb_node **p, *parent;
178 u64 idx = tl->fence_context;
181 * We track the most recently used timeline to skip a rbtree search
182 * for the common case, under typical loads we never need the rbtree
183 * at all. We can reuse the last slot if it is empty, that is
184 * after the previous activity has been retired, or if it matches the
187 node = READ_ONCE(ref->cache);
188 if (node && node->timeline == idx)
191 /* Preallocate a replacement, just in case */
192 prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
196 mutex_lock(&ref->mutex);
197 GEM_BUG_ON(i915_active_is_idle(ref));
200 p = &ref->tree.rb_node;
204 node = rb_entry(parent, struct active_node, node);
205 if (node->timeline == idx) {
206 kmem_cache_free(global.slab_cache, prealloc);
210 if (node->timeline < idx)
211 p = &parent->rb_right;
213 p = &parent->rb_left;
217 i915_active_request_init(&node->base, &tl->mutex, NULL, node_retire);
219 node->timeline = idx;
221 rb_link_node(&node->node, parent, p);
222 rb_insert_color(&node->node, &ref->tree);
226 mutex_unlock(&ref->mutex);
228 BUILD_BUG_ON(offsetof(typeof(*node), base));
232 void __i915_active_init(struct drm_i915_private *i915,
233 struct i915_active *ref,
234 int (*active)(struct i915_active *ref),
235 void (*retire)(struct i915_active *ref),
236 struct lock_class_key *key)
238 debug_active_init(ref);
242 ref->active = active;
243 ref->retire = retire;
246 init_llist_head(&ref->preallocated_barriers);
247 atomic_set(&ref->count, 0);
248 __mutex_init(&ref->mutex, "i915_active", key);
251 static bool ____active_del_barrier(struct i915_active *ref,
252 struct active_node *node,
253 struct intel_engine_cs *engine)
256 struct llist_node *head = NULL, *tail = NULL;
257 struct llist_node *pos, *next;
259 GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context);
262 * Rebuild the llist excluding our node. We may perform this
263 * outside of the kernel_context timeline mutex and so someone
264 * else may be manipulating the engine->barrier_tasks, in
265 * which case either we or they will be upset :)
267 * A second __active_del_barrier() will report failure to claim
268 * the active_node and the caller will just shrug and know not to
269 * claim ownership of its node.
271 * A concurrent i915_request_add_active_barriers() will miss adding
272 * any of the tasks, but we will try again on the next -- and since
273 * we are actively using the barrier, we know that there will be
274 * at least another opportunity when we idle.
276 llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) {
277 if (node == barrier_from_ll(pos)) {
288 llist_add_batch(head, tail, &engine->barrier_tasks);
294 __active_del_barrier(struct i915_active *ref, struct active_node *node)
296 return ____active_del_barrier(ref, node, barrier_to_engine(node));
299 int i915_active_ref(struct i915_active *ref,
300 struct intel_timeline *tl,
301 struct i915_request *rq)
303 struct i915_active_request *active;
306 lockdep_assert_held(&tl->mutex);
308 /* Prevent reaping in case we malloc/wait while building the tree */
309 err = i915_active_acquire(ref);
313 active = active_instance(ref, tl);
319 if (is_barrier(active)) { /* proto-node used by our idle barrier */
321 * This request is on the kernel_context timeline, and so
322 * we can use it to substitute for the pending idle-barrer
323 * request that we want to emit on the kernel_context.
325 __active_del_barrier(ref, node_from_active(active));
326 RCU_INIT_POINTER(active->request, NULL);
327 INIT_LIST_HEAD(&active->link);
329 if (!i915_active_request_isset(active))
330 atomic_inc(&ref->count);
332 GEM_BUG_ON(!atomic_read(&ref->count));
333 __i915_active_request_set(active, rq);
336 i915_active_release(ref);
340 int i915_active_acquire(struct i915_active *ref)
344 debug_active_assert(ref);
345 if (atomic_add_unless(&ref->count, 1, 0))
348 err = mutex_lock_interruptible(&ref->mutex);
352 if (!atomic_read(&ref->count) && ref->active)
353 err = ref->active(ref);
355 debug_active_activate(ref);
356 atomic_inc(&ref->count);
359 mutex_unlock(&ref->mutex);
364 void i915_active_release(struct i915_active *ref)
366 debug_active_assert(ref);
370 static void __active_ungrab(struct i915_active *ref)
372 clear_and_wake_up_bit(I915_ACTIVE_GRAB_BIT, &ref->flags);
375 bool i915_active_trygrab(struct i915_active *ref)
377 debug_active_assert(ref);
379 if (test_and_set_bit(I915_ACTIVE_GRAB_BIT, &ref->flags))
382 if (!atomic_add_unless(&ref->count, 1, 0)) {
383 __active_ungrab(ref);
390 void i915_active_ungrab(struct i915_active *ref)
392 GEM_BUG_ON(!test_bit(I915_ACTIVE_GRAB_BIT, &ref->flags));
395 __active_ungrab(ref);
398 int i915_active_wait(struct i915_active *ref)
400 struct active_node *it, *n;
404 might_lock(&ref->mutex);
406 if (i915_active_is_idle(ref))
409 err = mutex_lock_interruptible(&ref->mutex);
413 if (!atomic_add_unless(&ref->count, 1, 0)) {
414 mutex_unlock(&ref->mutex);
418 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
419 if (is_barrier(&it->base)) { /* unconnected idle-barrier */
424 err = i915_active_request_retire(&it->base, BKL(ref));
429 __active_retire(ref);
433 if (wait_on_bit(&ref->flags, I915_ACTIVE_GRAB_BIT, TASK_KILLABLE))
436 if (!i915_active_is_idle(ref))
442 int i915_request_await_active_request(struct i915_request *rq,
443 struct i915_active_request *active)
445 struct i915_request *barrier =
446 i915_active_request_raw(active, &rq->i915->drm.struct_mutex);
448 return barrier ? i915_request_await_dma_fence(rq, &barrier->fence) : 0;
451 int i915_request_await_active(struct i915_request *rq, struct i915_active *ref)
453 struct active_node *it, *n;
456 if (RB_EMPTY_ROOT(&ref->tree))
459 /* await allocates and so we need to avoid hitting the shrinker */
460 err = i915_active_acquire(ref);
464 mutex_lock(&ref->mutex);
465 rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) {
466 err = i915_request_await_active_request(rq, &it->base);
470 mutex_unlock(&ref->mutex);
472 i915_active_release(ref);
476 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
477 void i915_active_fini(struct i915_active *ref)
479 debug_active_fini(ref);
480 GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree));
481 GEM_BUG_ON(atomic_read(&ref->count));
482 mutex_destroy(&ref->mutex);
486 static inline bool is_idle_barrier(struct active_node *node, u64 idx)
488 return node->timeline == idx && !i915_active_request_isset(&node->base);
491 static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx)
493 struct rb_node *prev, *p;
495 if (RB_EMPTY_ROOT(&ref->tree))
498 mutex_lock(&ref->mutex);
499 GEM_BUG_ON(i915_active_is_idle(ref));
502 * Try to reuse any existing barrier nodes already allocated for this
503 * i915_active, due to overlapping active phases there is likely a
504 * node kept alive (as we reuse before parking). We prefer to reuse
505 * completely idle barriers (less hassle in manipulating the llists),
506 * but otherwise any will do.
508 if (ref->cache && is_idle_barrier(ref->cache, idx)) {
509 p = &ref->cache->node;
514 p = ref->tree.rb_node;
516 struct active_node *node =
517 rb_entry(p, struct active_node, node);
519 if (is_idle_barrier(node, idx))
523 if (node->timeline < idx)
530 * No quick match, but we did find the leftmost rb_node for the
531 * kernel_context. Walk the rb_tree in-order to see if there were
532 * any idle-barriers on this timeline that we missed, or just use
533 * the first pending barrier.
535 for (p = prev; p; p = rb_next(p)) {
536 struct active_node *node =
537 rb_entry(p, struct active_node, node);
538 struct intel_engine_cs *engine;
540 if (node->timeline > idx)
543 if (node->timeline < idx)
546 if (is_idle_barrier(node, idx))
550 * The list of pending barriers is protected by the
551 * kernel_context timeline, which notably we do not hold
552 * here. i915_request_add_active_barriers() may consume
553 * the barrier before we claim it, so we have to check
556 engine = __barrier_to_engine(node);
557 smp_rmb(); /* serialise with add_active_barriers */
558 if (is_barrier(&node->base) &&
559 ____active_del_barrier(ref, node, engine))
563 mutex_unlock(&ref->mutex);
568 rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */
569 if (p == &ref->cache->node)
571 mutex_unlock(&ref->mutex);
573 return rb_entry(p, struct active_node, node);
576 int i915_active_acquire_preallocate_barrier(struct i915_active *ref,
577 struct intel_engine_cs *engine)
579 struct drm_i915_private *i915 = engine->i915;
580 intel_engine_mask_t tmp, mask = engine->mask;
581 struct llist_node *pos, *next;
584 GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers));
587 * Preallocate a node for each physical engine supporting the target
588 * engine (remember virtual engines have more than one sibling).
589 * We can then use the preallocated nodes in
590 * i915_active_acquire_barrier()
592 for_each_engine_masked(engine, i915, mask, tmp) {
593 u64 idx = engine->kernel_context->timeline->fence_context;
594 struct active_node *node;
596 node = reuse_idle_barrier(ref, idx);
598 node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL);
604 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
606 &engine->kernel_context->timeline->mutex;
608 RCU_INIT_POINTER(node->base.request, NULL);
609 node->base.retire = node_retire;
610 node->timeline = idx;
614 if (!i915_active_request_isset(&node->base)) {
616 * Mark this as being *our* unconnected proto-node.
618 * Since this node is not in any list, and we have
619 * decoupled it from the rbtree, we can reuse the
620 * request to indicate this is an idle-barrier node
621 * and then we can use the rb_node and list pointers
622 * for our tracking of the pending barrier.
624 RCU_INIT_POINTER(node->base.request, ERR_PTR(-EAGAIN));
625 node->base.link.prev = (void *)engine;
626 atomic_inc(&ref->count);
629 GEM_BUG_ON(barrier_to_engine(node) != engine);
630 llist_add(barrier_to_ll(node), &ref->preallocated_barriers);
631 intel_engine_pm_get(engine);
637 llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
638 struct active_node *node = barrier_from_ll(pos);
640 atomic_dec(&ref->count);
641 intel_engine_pm_put(barrier_to_engine(node));
643 kmem_cache_free(global.slab_cache, node);
648 void i915_active_acquire_barrier(struct i915_active *ref)
650 struct llist_node *pos, *next;
652 GEM_BUG_ON(i915_active_is_idle(ref));
655 * Transfer the list of preallocated barriers into the
656 * i915_active rbtree, but only as proto-nodes. They will be
657 * populated by i915_request_add_active_barriers() to point to the
658 * request that will eventually release them.
660 mutex_lock_nested(&ref->mutex, SINGLE_DEPTH_NESTING);
661 llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) {
662 struct active_node *node = barrier_from_ll(pos);
663 struct intel_engine_cs *engine = barrier_to_engine(node);
664 struct rb_node **p, *parent;
667 p = &ref->tree.rb_node;
669 struct active_node *it;
673 it = rb_entry(parent, struct active_node, node);
674 if (it->timeline < node->timeline)
675 p = &parent->rb_right;
677 p = &parent->rb_left;
679 rb_link_node(&node->node, parent, p);
680 rb_insert_color(&node->node, &ref->tree);
682 llist_add(barrier_to_ll(node), &engine->barrier_tasks);
683 intel_engine_pm_put(engine);
685 mutex_unlock(&ref->mutex);
688 void i915_request_add_active_barriers(struct i915_request *rq)
690 struct intel_engine_cs *engine = rq->engine;
691 struct llist_node *node, *next;
693 GEM_BUG_ON(intel_engine_is_virtual(engine));
694 GEM_BUG_ON(rq->timeline != engine->kernel_context->timeline);
697 * Attach the list of proto-fences to the in-flight request such
698 * that the parent i915_active will be released when this request
701 llist_for_each_safe(node, next, llist_del_all(&engine->barrier_tasks)) {
702 RCU_INIT_POINTER(barrier_from_ll(node)->base.request, rq);
703 smp_wmb(); /* serialise with reuse_idle_barrier */
704 list_add_tail((struct list_head *)node, &rq->active_list);
708 int i915_active_request_set(struct i915_active_request *active,
709 struct i915_request *rq)
713 #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)
714 lockdep_assert_held(active->lock);
717 /* Must maintain ordering wrt previous active requests */
718 err = i915_request_await_active_request(rq, active);
722 __i915_active_request_set(active, rq);
726 void i915_active_retire_noop(struct i915_active_request *active,
727 struct i915_request *request)
729 /* Space left intentionally blank */
732 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
733 #include "selftests/i915_active.c"
736 static void i915_global_active_shrink(void)
738 kmem_cache_shrink(global.slab_cache);
741 static void i915_global_active_exit(void)
743 kmem_cache_destroy(global.slab_cache);
746 static struct i915_global_active global = { {
747 .shrink = i915_global_active_shrink,
748 .exit = i915_global_active_exit,
751 int __init i915_global_active_init(void)
753 global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN);
754 if (!global.slab_cache)
757 i915_global_register(&global.base);