Commit | Line | Data |
---|---|---|
64d6c500 CW |
1 | /* |
2 | * SPDX-License-Identifier: MIT | |
3 | * | |
4 | * Copyright © 2019 Intel Corporation | |
5 | */ | |
6 | ||
5361db1a CW |
7 | #include <linux/debugobjects.h> |
8 | ||
e6ba7648 | 9 | #include "gt/intel_context.h" |
7009db14 | 10 | #include "gt/intel_engine_pm.h" |
2871ea85 | 11 | #include "gt/intel_ring.h" |
7009db14 | 12 | |
64d6c500 CW |
13 | #include "i915_drv.h" |
14 | #include "i915_active.h" | |
103b76ee | 15 | #include "i915_globals.h" |
64d6c500 | 16 | |
5f5c139d CW |
17 | /* |
18 | * Active refs memory management | |
19 | * | |
20 | * To be more economical with memory, we reap all the i915_active trees as | |
21 | * they idle (when we know the active requests are inactive) and allocate the | |
22 | * nodes from a local slab cache to hopefully reduce the fragmentation. | |
23 | */ | |
24 | static struct i915_global_active { | |
103b76ee | 25 | struct i915_global base; |
5f5c139d CW |
26 | struct kmem_cache *slab_cache; |
27 | } global; | |
28 | ||
64d6c500 | 29 | struct active_node { |
b1e3177b | 30 | struct i915_active_fence base; |
64d6c500 CW |
31 | struct i915_active *ref; |
32 | struct rb_node node; | |
33 | u64 timeline; | |
34 | }; | |
35 | ||
d8af05ff | 36 | static inline struct active_node * |
b1e3177b | 37 | node_from_active(struct i915_active_fence *active) |
d8af05ff CW |
38 | { |
39 | return container_of(active, struct active_node, base); | |
40 | } | |
41 | ||
42 | #define take_preallocated_barriers(x) llist_del_all(&(x)->preallocated_barriers) | |
43 | ||
b1e3177b | 44 | static inline bool is_barrier(const struct i915_active_fence *active) |
d8af05ff | 45 | { |
b1e3177b | 46 | return IS_ERR(rcu_access_pointer(active->fence)); |
d8af05ff CW |
47 | } |
48 | ||
49 | static inline struct llist_node *barrier_to_ll(struct active_node *node) | |
50 | { | |
51 | GEM_BUG_ON(!is_barrier(&node->base)); | |
b1e3177b | 52 | return (struct llist_node *)&node->base.cb.node; |
d8af05ff CW |
53 | } |
54 | ||
f130b712 CW |
55 | static inline struct intel_engine_cs * |
56 | __barrier_to_engine(struct active_node *node) | |
57 | { | |
b1e3177b | 58 | return (struct intel_engine_cs *)READ_ONCE(node->base.cb.node.prev); |
f130b712 CW |
59 | } |
60 | ||
d8af05ff CW |
61 | static inline struct intel_engine_cs * |
62 | barrier_to_engine(struct active_node *node) | |
63 | { | |
64 | GEM_BUG_ON(!is_barrier(&node->base)); | |
f130b712 | 65 | return __barrier_to_engine(node); |
d8af05ff CW |
66 | } |
67 | ||
68 | static inline struct active_node *barrier_from_ll(struct llist_node *x) | |
69 | { | |
70 | return container_of((struct list_head *)x, | |
b1e3177b | 71 | struct active_node, base.cb.node); |
d8af05ff CW |
72 | } |
73 | ||
5361db1a CW |
74 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) && IS_ENABLED(CONFIG_DEBUG_OBJECTS) |
75 | ||
76 | static void *active_debug_hint(void *addr) | |
77 | { | |
78 | struct i915_active *ref = addr; | |
79 | ||
12c255b5 | 80 | return (void *)ref->active ?: (void *)ref->retire ?: (void *)ref; |
5361db1a CW |
81 | } |
82 | ||
83 | static struct debug_obj_descr active_debug_desc = { | |
84 | .name = "i915_active", | |
85 | .debug_hint = active_debug_hint, | |
86 | }; | |
87 | ||
88 | static void debug_active_init(struct i915_active *ref) | |
89 | { | |
90 | debug_object_init(ref, &active_debug_desc); | |
91 | } | |
92 | ||
93 | static void debug_active_activate(struct i915_active *ref) | |
94 | { | |
bbca083d | 95 | lockdep_assert_held(&ref->tree_lock); |
f52c6d0d CW |
96 | if (!atomic_read(&ref->count)) /* before the first inc */ |
97 | debug_object_activate(ref, &active_debug_desc); | |
5361db1a CW |
98 | } |
99 | ||
100 | static void debug_active_deactivate(struct i915_active *ref) | |
101 | { | |
c9ad602f | 102 | lockdep_assert_held(&ref->tree_lock); |
f52c6d0d CW |
103 | if (!atomic_read(&ref->count)) /* after the last dec */ |
104 | debug_object_deactivate(ref, &active_debug_desc); | |
5361db1a CW |
105 | } |
106 | ||
107 | static void debug_active_fini(struct i915_active *ref) | |
108 | { | |
109 | debug_object_free(ref, &active_debug_desc); | |
110 | } | |
111 | ||
112 | static void debug_active_assert(struct i915_active *ref) | |
113 | { | |
114 | debug_object_assert_init(ref, &active_debug_desc); | |
115 | } | |
116 | ||
117 | #else | |
118 | ||
119 | static inline void debug_active_init(struct i915_active *ref) { } | |
120 | static inline void debug_active_activate(struct i915_active *ref) { } | |
121 | static inline void debug_active_deactivate(struct i915_active *ref) { } | |
122 | static inline void debug_active_fini(struct i915_active *ref) { } | |
123 | static inline void debug_active_assert(struct i915_active *ref) { } | |
124 | ||
125 | #endif | |
126 | ||
a42375af | 127 | static void |
12c255b5 | 128 | __active_retire(struct i915_active *ref) |
a42375af CW |
129 | { |
130 | struct active_node *it, *n; | |
12c255b5 | 131 | struct rb_root root; |
c9ad602f | 132 | unsigned long flags; |
12c255b5 | 133 | |
274cbf20 | 134 | GEM_BUG_ON(i915_active_is_idle(ref)); |
12c255b5 CW |
135 | |
136 | /* return the unused nodes to our slabcache -- flushing the allocator */ | |
c9ad602f | 137 | if (!atomic_dec_and_lock_irqsave(&ref->count, &ref->tree_lock, flags)) |
12c255b5 CW |
138 | return; |
139 | ||
b1e3177b | 140 | GEM_BUG_ON(rcu_access_pointer(ref->excl.fence)); |
c9ad602f CW |
141 | debug_active_deactivate(ref); |
142 | ||
143 | root = ref->tree; | |
144 | ref->tree = RB_ROOT; | |
145 | ref->cache = NULL; | |
146 | ||
147 | spin_unlock_irqrestore(&ref->tree_lock, flags); | |
e1d7b66b CW |
148 | |
149 | /* After the final retire, the entire struct may be freed */ | |
150 | if (ref->retire) | |
151 | ref->retire(ref); | |
b1e3177b CW |
152 | |
153 | /* ... except if you wait on it, you must manage your own references! */ | |
154 | wake_up_var(ref); | |
c9ad602f CW |
155 | |
156 | rbtree_postorder_for_each_entry_safe(it, n, &root, node) { | |
157 | GEM_BUG_ON(i915_active_fence_isset(&it->base)); | |
158 | kmem_cache_free(global.slab_cache, it); | |
159 | } | |
a42375af CW |
160 | } |
161 | ||
274cbf20 CW |
162 | static void |
163 | active_work(struct work_struct *wrk) | |
164 | { | |
165 | struct i915_active *ref = container_of(wrk, typeof(*ref), work); | |
166 | ||
167 | GEM_BUG_ON(!atomic_read(&ref->count)); | |
168 | if (atomic_add_unless(&ref->count, -1, 1)) | |
169 | return; | |
170 | ||
274cbf20 CW |
171 | __active_retire(ref); |
172 | } | |
173 | ||
64d6c500 | 174 | static void |
12c255b5 | 175 | active_retire(struct i915_active *ref) |
64d6c500 | 176 | { |
12c255b5 CW |
177 | GEM_BUG_ON(!atomic_read(&ref->count)); |
178 | if (atomic_add_unless(&ref->count, -1, 1)) | |
a42375af CW |
179 | return; |
180 | ||
c9ad602f | 181 | if (ref->flags & I915_ACTIVE_RETIRE_SLEEPS) { |
274cbf20 CW |
182 | queue_work(system_unbound_wq, &ref->work); |
183 | return; | |
184 | } | |
185 | ||
12c255b5 | 186 | __active_retire(ref); |
64d6c500 CW |
187 | } |
188 | ||
df9f85d8 CW |
189 | static inline struct dma_fence ** |
190 | __active_fence_slot(struct i915_active_fence *active) | |
191 | { | |
192 | return (struct dma_fence ** __force)&active->fence; | |
193 | } | |
194 | ||
195 | static inline bool | |
196 | active_fence_cb(struct dma_fence *fence, struct dma_fence_cb *cb) | |
197 | { | |
198 | struct i915_active_fence *active = | |
199 | container_of(cb, typeof(*active), cb); | |
200 | ||
201 | return cmpxchg(__active_fence_slot(active), fence, NULL) == fence; | |
202 | } | |
203 | ||
64d6c500 | 204 | static void |
b1e3177b | 205 | node_retire(struct dma_fence *fence, struct dma_fence_cb *cb) |
64d6c500 | 206 | { |
df9f85d8 CW |
207 | if (active_fence_cb(fence, cb)) |
208 | active_retire(container_of(cb, struct active_node, base.cb)->ref); | |
64d6c500 CW |
209 | } |
210 | ||
b1e3177b CW |
211 | static void |
212 | excl_retire(struct dma_fence *fence, struct dma_fence_cb *cb) | |
213 | { | |
df9f85d8 CW |
214 | if (active_fence_cb(fence, cb)) |
215 | active_retire(container_of(cb, struct i915_active, excl.cb)); | |
b1e3177b CW |
216 | } |
217 | ||
218 | static struct i915_active_fence * | |
25ffd4b1 | 219 | active_instance(struct i915_active *ref, struct intel_timeline *tl) |
64d6c500 | 220 | { |
12c255b5 | 221 | struct active_node *node, *prealloc; |
64d6c500 | 222 | struct rb_node **p, *parent; |
25ffd4b1 | 223 | u64 idx = tl->fence_context; |
64d6c500 CW |
224 | |
225 | /* | |
226 | * We track the most recently used timeline to skip a rbtree search | |
227 | * for the common case, under typical loads we never need the rbtree | |
228 | * at all. We can reuse the last slot if it is empty, that is | |
229 | * after the previous activity has been retired, or if it matches the | |
230 | * current timeline. | |
64d6c500 | 231 | */ |
12c255b5 CW |
232 | node = READ_ONCE(ref->cache); |
233 | if (node && node->timeline == idx) | |
234 | return &node->base; | |
235 | ||
236 | /* Preallocate a replacement, just in case */ | |
237 | prealloc = kmem_cache_alloc(global.slab_cache, GFP_KERNEL); | |
238 | if (!prealloc) | |
239 | return NULL; | |
64d6c500 | 240 | |
c9ad602f | 241 | spin_lock_irq(&ref->tree_lock); |
12c255b5 | 242 | GEM_BUG_ON(i915_active_is_idle(ref)); |
64d6c500 CW |
243 | |
244 | parent = NULL; | |
245 | p = &ref->tree.rb_node; | |
246 | while (*p) { | |
247 | parent = *p; | |
248 | ||
249 | node = rb_entry(parent, struct active_node, node); | |
12c255b5 CW |
250 | if (node->timeline == idx) { |
251 | kmem_cache_free(global.slab_cache, prealloc); | |
252 | goto out; | |
253 | } | |
64d6c500 CW |
254 | |
255 | if (node->timeline < idx) | |
256 | p = &parent->rb_right; | |
257 | else | |
258 | p = &parent->rb_left; | |
259 | } | |
260 | ||
12c255b5 | 261 | node = prealloc; |
df9f85d8 | 262 | __i915_active_fence_init(&node->base, NULL, node_retire); |
64d6c500 CW |
263 | node->ref = ref; |
264 | node->timeline = idx; | |
265 | ||
266 | rb_link_node(&node->node, parent, p); | |
267 | rb_insert_color(&node->node, &ref->tree); | |
268 | ||
64d6c500 | 269 | out: |
12c255b5 | 270 | ref->cache = node; |
c9ad602f | 271 | spin_unlock_irq(&ref->tree_lock); |
12c255b5 | 272 | |
d8af05ff | 273 | BUILD_BUG_ON(offsetof(typeof(*node), base)); |
12c255b5 | 274 | return &node->base; |
64d6c500 CW |
275 | } |
276 | ||
b1e3177b | 277 | void __i915_active_init(struct i915_active *ref, |
12c255b5 CW |
278 | int (*active)(struct i915_active *ref), |
279 | void (*retire)(struct i915_active *ref), | |
ae303004 CW |
280 | struct lock_class_key *mkey, |
281 | struct lock_class_key *wkey) | |
64d6c500 | 282 | { |
274cbf20 CW |
283 | unsigned long bits; |
284 | ||
5361db1a CW |
285 | debug_active_init(ref); |
286 | ||
79c7a28e | 287 | ref->flags = 0; |
12c255b5 | 288 | ref->active = active; |
274cbf20 CW |
289 | ref->retire = ptr_unpack_bits(retire, &bits, 2); |
290 | if (bits & I915_ACTIVE_MAY_SLEEP) | |
291 | ref->flags |= I915_ACTIVE_RETIRE_SLEEPS; | |
2850748e | 292 | |
c9ad602f | 293 | spin_lock_init(&ref->tree_lock); |
64d6c500 | 294 | ref->tree = RB_ROOT; |
12c255b5 | 295 | ref->cache = NULL; |
c9ad602f | 296 | |
d8af05ff | 297 | init_llist_head(&ref->preallocated_barriers); |
12c255b5 | 298 | atomic_set(&ref->count, 0); |
ae303004 | 299 | __mutex_init(&ref->mutex, "i915_active", mkey); |
df9f85d8 | 300 | __i915_active_fence_init(&ref->excl, NULL, excl_retire); |
274cbf20 | 301 | INIT_WORK(&ref->work, active_work); |
ae303004 CW |
302 | #if IS_ENABLED(CONFIG_LOCKDEP) |
303 | lockdep_init_map(&ref->work.lockdep_map, "i915_active.work", wkey, 0); | |
304 | #endif | |
64d6c500 CW |
305 | } |
306 | ||
f130b712 CW |
307 | static bool ____active_del_barrier(struct i915_active *ref, |
308 | struct active_node *node, | |
309 | struct intel_engine_cs *engine) | |
310 | ||
d8af05ff | 311 | { |
d8af05ff CW |
312 | struct llist_node *head = NULL, *tail = NULL; |
313 | struct llist_node *pos, *next; | |
314 | ||
75d0a7f3 | 315 | GEM_BUG_ON(node->timeline != engine->kernel_context->timeline->fence_context); |
d8af05ff CW |
316 | |
317 | /* | |
318 | * Rebuild the llist excluding our node. We may perform this | |
319 | * outside of the kernel_context timeline mutex and so someone | |
320 | * else may be manipulating the engine->barrier_tasks, in | |
321 | * which case either we or they will be upset :) | |
322 | * | |
323 | * A second __active_del_barrier() will report failure to claim | |
324 | * the active_node and the caller will just shrug and know not to | |
325 | * claim ownership of its node. | |
326 | * | |
327 | * A concurrent i915_request_add_active_barriers() will miss adding | |
328 | * any of the tasks, but we will try again on the next -- and since | |
329 | * we are actively using the barrier, we know that there will be | |
330 | * at least another opportunity when we idle. | |
331 | */ | |
332 | llist_for_each_safe(pos, next, llist_del_all(&engine->barrier_tasks)) { | |
333 | if (node == barrier_from_ll(pos)) { | |
334 | node = NULL; | |
335 | continue; | |
336 | } | |
337 | ||
338 | pos->next = head; | |
339 | head = pos; | |
340 | if (!tail) | |
341 | tail = pos; | |
342 | } | |
343 | if (head) | |
344 | llist_add_batch(head, tail, &engine->barrier_tasks); | |
345 | ||
346 | return !node; | |
347 | } | |
348 | ||
f130b712 CW |
349 | static bool |
350 | __active_del_barrier(struct i915_active *ref, struct active_node *node) | |
351 | { | |
352 | return ____active_del_barrier(ref, node, barrier_to_engine(node)); | |
353 | } | |
354 | ||
64d6c500 | 355 | int i915_active_ref(struct i915_active *ref, |
25ffd4b1 | 356 | struct intel_timeline *tl, |
b1e3177b | 357 | struct dma_fence *fence) |
64d6c500 | 358 | { |
b1e3177b | 359 | struct i915_active_fence *active; |
12c255b5 | 360 | int err; |
312c4ba1 | 361 | |
25ffd4b1 CW |
362 | lockdep_assert_held(&tl->mutex); |
363 | ||
312c4ba1 | 364 | /* Prevent reaping in case we malloc/wait while building the tree */ |
12c255b5 CW |
365 | err = i915_active_acquire(ref); |
366 | if (err) | |
367 | return err; | |
64d6c500 | 368 | |
25ffd4b1 | 369 | active = active_instance(ref, tl); |
12c255b5 CW |
370 | if (!active) { |
371 | err = -ENOMEM; | |
312c4ba1 CW |
372 | goto out; |
373 | } | |
64d6c500 | 374 | |
d8af05ff CW |
375 | if (is_barrier(active)) { /* proto-node used by our idle barrier */ |
376 | /* | |
377 | * This request is on the kernel_context timeline, and so | |
378 | * we can use it to substitute for the pending idle-barrer | |
379 | * request that we want to emit on the kernel_context. | |
380 | */ | |
381 | __active_del_barrier(ref, node_from_active(active)); | |
b1e3177b CW |
382 | RCU_INIT_POINTER(active->fence, NULL); |
383 | atomic_dec(&ref->count); | |
d8af05ff | 384 | } |
b1e3177b CW |
385 | if (!__i915_active_fence_set(active, fence)) |
386 | atomic_inc(&ref->count); | |
64d6c500 | 387 | |
312c4ba1 CW |
388 | out: |
389 | i915_active_release(ref); | |
390 | return err; | |
64d6c500 CW |
391 | } |
392 | ||
2850748e CW |
393 | void i915_active_set_exclusive(struct i915_active *ref, struct dma_fence *f) |
394 | { | |
395 | /* We expect the caller to manage the exclusive timeline ordering */ | |
396 | GEM_BUG_ON(i915_active_is_idle(ref)); | |
397 | ||
b1e3177b CW |
398 | if (!__i915_active_fence_set(&ref->excl, f)) |
399 | atomic_inc(&ref->count); | |
b1e3177b | 400 | } |
2850748e | 401 | |
b1e3177b CW |
402 | bool i915_active_acquire_if_busy(struct i915_active *ref) |
403 | { | |
404 | debug_active_assert(ref); | |
405 | return atomic_add_unless(&ref->count, 1, 0); | |
2850748e CW |
406 | } |
407 | ||
12c255b5 | 408 | int i915_active_acquire(struct i915_active *ref) |
64d6c500 | 409 | { |
12c255b5 CW |
410 | int err; |
411 | ||
b1e3177b | 412 | if (i915_active_acquire_if_busy(ref)) |
12c255b5 | 413 | return 0; |
5361db1a | 414 | |
12c255b5 CW |
415 | err = mutex_lock_interruptible(&ref->mutex); |
416 | if (err) | |
417 | return err; | |
5361db1a | 418 | |
7c34bb03 CW |
419 | if (likely(!i915_active_acquire_if_busy(ref))) { |
420 | if (ref->active) | |
421 | err = ref->active(ref); | |
422 | if (!err) { | |
423 | spin_lock_irq(&ref->tree_lock); /* __active_retire() */ | |
424 | debug_active_activate(ref); | |
425 | atomic_inc(&ref->count); | |
426 | spin_unlock_irq(&ref->tree_lock); | |
427 | } | |
12c255b5 CW |
428 | } |
429 | ||
430 | mutex_unlock(&ref->mutex); | |
431 | ||
432 | return err; | |
64d6c500 CW |
433 | } |
434 | ||
435 | void i915_active_release(struct i915_active *ref) | |
436 | { | |
5361db1a | 437 | debug_active_assert(ref); |
12c255b5 | 438 | active_retire(ref); |
64d6c500 CW |
439 | } |
440 | ||
b1e3177b | 441 | static void enable_signaling(struct i915_active_fence *active) |
79c7a28e | 442 | { |
b1e3177b | 443 | struct dma_fence *fence; |
79c7a28e | 444 | |
b1e3177b CW |
445 | fence = i915_active_fence_get(active); |
446 | if (!fence) | |
447 | return; | |
2850748e | 448 | |
b1e3177b CW |
449 | dma_fence_enable_sw_signaling(fence); |
450 | dma_fence_put(fence); | |
2850748e CW |
451 | } |
452 | ||
64d6c500 CW |
453 | int i915_active_wait(struct i915_active *ref) |
454 | { | |
455 | struct active_node *it, *n; | |
b1e3177b | 456 | int err = 0; |
64d6c500 | 457 | |
12c255b5 | 458 | might_sleep(); |
12c255b5 | 459 | |
b1e3177b | 460 | if (!i915_active_acquire_if_busy(ref)) |
12c255b5 | 461 | return 0; |
2850748e | 462 | |
b1e3177b CW |
463 | /* Flush lazy signals */ |
464 | enable_signaling(&ref->excl); | |
64d6c500 | 465 | rbtree_postorder_for_each_entry_safe(it, n, &ref->tree, node) { |
b1e3177b CW |
466 | if (is_barrier(&it->base)) /* unconnected idle barrier */ |
467 | continue; | |
d8af05ff | 468 | |
b1e3177b | 469 | enable_signaling(&it->base); |
64d6c500 | 470 | } |
b1e3177b | 471 | /* Any fence added after the wait begins will not be auto-signaled */ |
64d6c500 | 472 | |
b1e3177b | 473 | i915_active_release(ref); |
afd1bcd4 CW |
474 | if (err) |
475 | return err; | |
476 | ||
b1e3177b | 477 | if (wait_var_event_interruptible(ref, i915_active_is_idle(ref))) |
79c7a28e CW |
478 | return -EINTR; |
479 | ||
e1cda6a5 | 480 | flush_work(&ref->work); |
afd1bcd4 | 481 | return 0; |
64d6c500 CW |
482 | } |
483 | ||
64d6c500 CW |
484 | int i915_request_await_active(struct i915_request *rq, struct i915_active *ref) |
485 | { | |
2850748e | 486 | int err = 0; |
64d6c500 | 487 | |
b1e3177b | 488 | if (rcu_access_pointer(ref->excl.fence)) { |
2850748e | 489 | struct dma_fence *fence; |
312c4ba1 | 490 | |
2850748e | 491 | rcu_read_lock(); |
b1e3177b | 492 | fence = dma_fence_get_rcu_safe(&ref->excl.fence); |
2850748e CW |
493 | rcu_read_unlock(); |
494 | if (fence) { | |
495 | err = i915_request_await_dma_fence(rq, fence); | |
496 | dma_fence_put(fence); | |
497 | } | |
64d6c500 CW |
498 | } |
499 | ||
2850748e CW |
500 | /* In the future we may choose to await on all fences */ |
501 | ||
312c4ba1 | 502 | return err; |
64d6c500 CW |
503 | } |
504 | ||
a42375af | 505 | #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM) |
64d6c500 CW |
506 | void i915_active_fini(struct i915_active *ref) |
507 | { | |
5361db1a | 508 | debug_active_fini(ref); |
12c255b5 | 509 | GEM_BUG_ON(atomic_read(&ref->count)); |
274cbf20 CW |
510 | GEM_BUG_ON(work_pending(&ref->work)); |
511 | GEM_BUG_ON(!RB_EMPTY_ROOT(&ref->tree)); | |
12c255b5 | 512 | mutex_destroy(&ref->mutex); |
64d6c500 | 513 | } |
a42375af | 514 | #endif |
64d6c500 | 515 | |
d8af05ff CW |
516 | static inline bool is_idle_barrier(struct active_node *node, u64 idx) |
517 | { | |
b1e3177b | 518 | return node->timeline == idx && !i915_active_fence_isset(&node->base); |
d8af05ff CW |
519 | } |
520 | ||
521 | static struct active_node *reuse_idle_barrier(struct i915_active *ref, u64 idx) | |
522 | { | |
523 | struct rb_node *prev, *p; | |
524 | ||
525 | if (RB_EMPTY_ROOT(&ref->tree)) | |
526 | return NULL; | |
527 | ||
c9ad602f | 528 | spin_lock_irq(&ref->tree_lock); |
d8af05ff CW |
529 | GEM_BUG_ON(i915_active_is_idle(ref)); |
530 | ||
531 | /* | |
532 | * Try to reuse any existing barrier nodes already allocated for this | |
533 | * i915_active, due to overlapping active phases there is likely a | |
534 | * node kept alive (as we reuse before parking). We prefer to reuse | |
535 | * completely idle barriers (less hassle in manipulating the llists), | |
536 | * but otherwise any will do. | |
537 | */ | |
538 | if (ref->cache && is_idle_barrier(ref->cache, idx)) { | |
539 | p = &ref->cache->node; | |
540 | goto match; | |
541 | } | |
542 | ||
543 | prev = NULL; | |
544 | p = ref->tree.rb_node; | |
545 | while (p) { | |
546 | struct active_node *node = | |
547 | rb_entry(p, struct active_node, node); | |
548 | ||
549 | if (is_idle_barrier(node, idx)) | |
550 | goto match; | |
551 | ||
552 | prev = p; | |
553 | if (node->timeline < idx) | |
554 | p = p->rb_right; | |
555 | else | |
556 | p = p->rb_left; | |
557 | } | |
558 | ||
559 | /* | |
560 | * No quick match, but we did find the leftmost rb_node for the | |
561 | * kernel_context. Walk the rb_tree in-order to see if there were | |
562 | * any idle-barriers on this timeline that we missed, or just use | |
563 | * the first pending barrier. | |
564 | */ | |
565 | for (p = prev; p; p = rb_next(p)) { | |
566 | struct active_node *node = | |
567 | rb_entry(p, struct active_node, node); | |
f130b712 | 568 | struct intel_engine_cs *engine; |
d8af05ff CW |
569 | |
570 | if (node->timeline > idx) | |
571 | break; | |
572 | ||
573 | if (node->timeline < idx) | |
574 | continue; | |
575 | ||
576 | if (is_idle_barrier(node, idx)) | |
577 | goto match; | |
578 | ||
579 | /* | |
580 | * The list of pending barriers is protected by the | |
581 | * kernel_context timeline, which notably we do not hold | |
582 | * here. i915_request_add_active_barriers() may consume | |
583 | * the barrier before we claim it, so we have to check | |
584 | * for success. | |
585 | */ | |
f130b712 CW |
586 | engine = __barrier_to_engine(node); |
587 | smp_rmb(); /* serialise with add_active_barriers */ | |
588 | if (is_barrier(&node->base) && | |
589 | ____active_del_barrier(ref, node, engine)) | |
d8af05ff CW |
590 | goto match; |
591 | } | |
592 | ||
c9ad602f | 593 | spin_unlock_irq(&ref->tree_lock); |
d8af05ff CW |
594 | |
595 | return NULL; | |
596 | ||
597 | match: | |
598 | rb_erase(p, &ref->tree); /* Hide from waits and sibling allocations */ | |
599 | if (p == &ref->cache->node) | |
600 | ref->cache = NULL; | |
c9ad602f | 601 | spin_unlock_irq(&ref->tree_lock); |
d8af05ff CW |
602 | |
603 | return rb_entry(p, struct active_node, node); | |
604 | } | |
605 | ||
ce476c80 CW |
606 | int i915_active_acquire_preallocate_barrier(struct i915_active *ref, |
607 | struct intel_engine_cs *engine) | |
608 | { | |
3f99a614 | 609 | intel_engine_mask_t tmp, mask = engine->mask; |
52144db1 | 610 | struct llist_node *first = NULL, *last = NULL; |
a50134b1 | 611 | struct intel_gt *gt = engine->gt; |
7009db14 | 612 | int err; |
ce476c80 | 613 | |
b5e8e954 | 614 | GEM_BUG_ON(i915_active_is_idle(ref)); |
84135022 CW |
615 | |
616 | /* Wait until the previous preallocation is completed */ | |
617 | while (!llist_empty(&ref->preallocated_barriers)) | |
618 | cond_resched(); | |
d8af05ff CW |
619 | |
620 | /* | |
621 | * Preallocate a node for each physical engine supporting the target | |
622 | * engine (remember virtual engines have more than one sibling). | |
623 | * We can then use the preallocated nodes in | |
624 | * i915_active_acquire_barrier() | |
625 | */ | |
a50134b1 | 626 | for_each_engine_masked(engine, gt, mask, tmp) { |
75d0a7f3 | 627 | u64 idx = engine->kernel_context->timeline->fence_context; |
52144db1 | 628 | struct llist_node *prev = first; |
ce476c80 CW |
629 | struct active_node *node; |
630 | ||
d8af05ff CW |
631 | node = reuse_idle_barrier(ref, idx); |
632 | if (!node) { | |
633 | node = kmem_cache_alloc(global.slab_cache, GFP_KERNEL); | |
634 | if (!node) { | |
635 | err = ENOMEM; | |
636 | goto unwind; | |
637 | } | |
638 | ||
b1e3177b CW |
639 | RCU_INIT_POINTER(node->base.fence, NULL); |
640 | node->base.cb.func = node_retire; | |
d8af05ff CW |
641 | node->timeline = idx; |
642 | node->ref = ref; | |
ce476c80 CW |
643 | } |
644 | ||
b1e3177b | 645 | if (!i915_active_fence_isset(&node->base)) { |
d8af05ff CW |
646 | /* |
647 | * Mark this as being *our* unconnected proto-node. | |
648 | * | |
649 | * Since this node is not in any list, and we have | |
650 | * decoupled it from the rbtree, we can reuse the | |
651 | * request to indicate this is an idle-barrier node | |
652 | * and then we can use the rb_node and list pointers | |
653 | * for our tracking of the pending barrier. | |
654 | */ | |
b1e3177b CW |
655 | RCU_INIT_POINTER(node->base.fence, ERR_PTR(-EAGAIN)); |
656 | node->base.cb.node.prev = (void *)engine; | |
d8af05ff CW |
657 | atomic_inc(&ref->count); |
658 | } | |
df9f85d8 | 659 | GEM_BUG_ON(rcu_access_pointer(node->base.fence) != ERR_PTR(-EAGAIN)); |
ce476c80 | 660 | |
d8af05ff | 661 | GEM_BUG_ON(barrier_to_engine(node) != engine); |
52144db1 JRS |
662 | first = barrier_to_ll(node); |
663 | first->next = prev; | |
664 | if (!last) | |
665 | last = first; | |
7009db14 | 666 | intel_engine_pm_get(engine); |
ce476c80 CW |
667 | } |
668 | ||
84135022 | 669 | GEM_BUG_ON(!llist_empty(&ref->preallocated_barriers)); |
52144db1 | 670 | llist_add_batch(first, last, &ref->preallocated_barriers); |
84135022 | 671 | |
7009db14 CW |
672 | return 0; |
673 | ||
674 | unwind: | |
52144db1 JRS |
675 | while (first) { |
676 | struct active_node *node = barrier_from_ll(first); | |
7009db14 | 677 | |
52144db1 | 678 | first = first->next; |
84135022 | 679 | |
d8af05ff CW |
680 | atomic_dec(&ref->count); |
681 | intel_engine_pm_put(barrier_to_engine(node)); | |
7009db14 | 682 | |
7009db14 CW |
683 | kmem_cache_free(global.slab_cache, node); |
684 | } | |
ce476c80 CW |
685 | return err; |
686 | } | |
687 | ||
688 | void i915_active_acquire_barrier(struct i915_active *ref) | |
689 | { | |
690 | struct llist_node *pos, *next; | |
c9ad602f | 691 | unsigned long flags; |
ce476c80 | 692 | |
12c255b5 | 693 | GEM_BUG_ON(i915_active_is_idle(ref)); |
ce476c80 | 694 | |
d8af05ff CW |
695 | /* |
696 | * Transfer the list of preallocated barriers into the | |
697 | * i915_active rbtree, but only as proto-nodes. They will be | |
698 | * populated by i915_request_add_active_barriers() to point to the | |
699 | * request that will eventually release them. | |
700 | */ | |
d8af05ff CW |
701 | llist_for_each_safe(pos, next, take_preallocated_barriers(ref)) { |
702 | struct active_node *node = barrier_from_ll(pos); | |
703 | struct intel_engine_cs *engine = barrier_to_engine(node); | |
ce476c80 CW |
704 | struct rb_node **p, *parent; |
705 | ||
07779a76 CW |
706 | spin_lock_irqsave_nested(&ref->tree_lock, flags, |
707 | SINGLE_DEPTH_NESTING); | |
ce476c80 CW |
708 | parent = NULL; |
709 | p = &ref->tree.rb_node; | |
710 | while (*p) { | |
d8af05ff CW |
711 | struct active_node *it; |
712 | ||
ce476c80 | 713 | parent = *p; |
d8af05ff CW |
714 | |
715 | it = rb_entry(parent, struct active_node, node); | |
716 | if (it->timeline < node->timeline) | |
ce476c80 CW |
717 | p = &parent->rb_right; |
718 | else | |
719 | p = &parent->rb_left; | |
720 | } | |
721 | rb_link_node(&node->node, parent, p); | |
722 | rb_insert_color(&node->node, &ref->tree); | |
07779a76 | 723 | spin_unlock_irqrestore(&ref->tree_lock, flags); |
ce476c80 | 724 | |
b7234840 | 725 | GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); |
d8af05ff | 726 | llist_add(barrier_to_ll(node), &engine->barrier_tasks); |
7009db14 | 727 | intel_engine_pm_put(engine); |
ce476c80 | 728 | } |
ce476c80 CW |
729 | } |
730 | ||
df9f85d8 CW |
731 | static struct dma_fence **ll_to_fence_slot(struct llist_node *node) |
732 | { | |
733 | return __active_fence_slot(&barrier_from_ll(node)->base); | |
734 | } | |
735 | ||
d8af05ff | 736 | void i915_request_add_active_barriers(struct i915_request *rq) |
ce476c80 CW |
737 | { |
738 | struct intel_engine_cs *engine = rq->engine; | |
739 | struct llist_node *node, *next; | |
b1e3177b | 740 | unsigned long flags; |
ce476c80 | 741 | |
e6ba7648 | 742 | GEM_BUG_ON(!intel_context_is_barrier(rq->context)); |
d8af05ff | 743 | GEM_BUG_ON(intel_engine_is_virtual(engine)); |
d19d71fc | 744 | GEM_BUG_ON(i915_request_timeline(rq) != engine->kernel_context->timeline); |
d8af05ff | 745 | |
b1e3177b CW |
746 | node = llist_del_all(&engine->barrier_tasks); |
747 | if (!node) | |
748 | return; | |
d8af05ff CW |
749 | /* |
750 | * Attach the list of proto-fences to the in-flight request such | |
751 | * that the parent i915_active will be released when this request | |
752 | * is retired. | |
753 | */ | |
b1e3177b CW |
754 | spin_lock_irqsave(&rq->lock, flags); |
755 | llist_for_each_safe(node, next, node) { | |
df9f85d8 CW |
756 | /* serialise with reuse_idle_barrier */ |
757 | smp_store_mb(*ll_to_fence_slot(node), &rq->fence); | |
b1e3177b CW |
758 | list_add_tail((struct list_head *)node, &rq->fence.cb_list); |
759 | } | |
760 | spin_unlock_irqrestore(&rq->lock, flags); | |
761 | } | |
762 | ||
b1e3177b CW |
763 | /* |
764 | * __i915_active_fence_set: Update the last active fence along its timeline | |
765 | * @active: the active tracker | |
766 | * @fence: the new fence (under construction) | |
767 | * | |
768 | * Records the new @fence as the last active fence along its timeline in | |
769 | * this active tracker, moving the tracking callbacks from the previous | |
770 | * fence onto this one. Returns the previous fence (if not already completed), | |
771 | * which the caller must ensure is executed before the new fence. To ensure | |
772 | * that the order of fences within the timeline of the i915_active_fence is | |
df9f85d8 | 773 | * understood, it should be locked by the caller. |
b1e3177b CW |
774 | */ |
775 | struct dma_fence * | |
776 | __i915_active_fence_set(struct i915_active_fence *active, | |
777 | struct dma_fence *fence) | |
778 | { | |
779 | struct dma_fence *prev; | |
780 | unsigned long flags; | |
781 | ||
df9f85d8 CW |
782 | if (fence == rcu_access_pointer(active->fence)) |
783 | return fence; | |
784 | ||
b1e3177b CW |
785 | GEM_BUG_ON(test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)); |
786 | ||
df9f85d8 CW |
787 | /* |
788 | * Consider that we have two threads arriving (A and B), with | |
789 | * C already resident as the active->fence. | |
790 | * | |
791 | * A does the xchg first, and so it sees C or NULL depending | |
792 | * on the timing of the interrupt handler. If it is NULL, the | |
793 | * previous fence must have been signaled and we know that | |
794 | * we are first on the timeline. If it is still present, | |
795 | * we acquire the lock on that fence and serialise with the interrupt | |
796 | * handler, in the process removing it from any future interrupt | |
797 | * callback. A will then wait on C before executing (if present). | |
798 | * | |
799 | * As B is second, it sees A as the previous fence and so waits for | |
800 | * it to complete its transition and takes over the occupancy for | |
801 | * itself -- remembering that it needs to wait on A before executing. | |
802 | * | |
803 | * Note the strong ordering of the timeline also provides consistent | |
804 | * nesting rules for the fence->lock; the inner lock is always the | |
805 | * older lock. | |
806 | */ | |
807 | spin_lock_irqsave(fence->lock, flags); | |
808 | prev = xchg(__active_fence_slot(active), fence); | |
b1e3177b CW |
809 | if (prev) { |
810 | GEM_BUG_ON(prev == fence); | |
811 | spin_lock_nested(prev->lock, SINGLE_DEPTH_NESTING); | |
812 | __list_del_entry(&active->cb.node); | |
813 | spin_unlock(prev->lock); /* serialise with prev->cb_list */ | |
d8af05ff | 814 | } |
df9f85d8 | 815 | GEM_BUG_ON(rcu_access_pointer(active->fence) != fence); |
b1e3177b | 816 | list_add_tail(&active->cb.node, &fence->cb_list); |
b1e3177b CW |
817 | spin_unlock_irqrestore(fence->lock, flags); |
818 | ||
819 | return prev; | |
ce476c80 CW |
820 | } |
821 | ||
b1e3177b CW |
822 | int i915_active_fence_set(struct i915_active_fence *active, |
823 | struct i915_request *rq) | |
21950ee7 | 824 | { |
b1e3177b CW |
825 | struct dma_fence *fence; |
826 | int err = 0; | |
21950ee7 | 827 | |
b1e3177b CW |
828 | /* Must maintain timeline ordering wrt previous active requests */ |
829 | rcu_read_lock(); | |
830 | fence = __i915_active_fence_set(active, &rq->fence); | |
831 | if (fence) /* but the previous fence may not belong to that timeline! */ | |
832 | fence = dma_fence_get_rcu(fence); | |
833 | rcu_read_unlock(); | |
834 | if (fence) { | |
835 | err = i915_request_await_dma_fence(rq, fence); | |
836 | dma_fence_put(fence); | |
837 | } | |
21950ee7 | 838 | |
b1e3177b | 839 | return err; |
21950ee7 CW |
840 | } |
841 | ||
b1e3177b | 842 | void i915_active_noop(struct dma_fence *fence, struct dma_fence_cb *cb) |
21950ee7 | 843 | { |
df9f85d8 | 844 | active_fence_cb(fence, cb); |
21950ee7 CW |
845 | } |
846 | ||
64d6c500 CW |
847 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
848 | #include "selftests/i915_active.c" | |
849 | #endif | |
5f5c139d | 850 | |
103b76ee | 851 | static void i915_global_active_shrink(void) |
5f5c139d | 852 | { |
103b76ee | 853 | kmem_cache_shrink(global.slab_cache); |
5f5c139d CW |
854 | } |
855 | ||
103b76ee | 856 | static void i915_global_active_exit(void) |
32eb6bcf | 857 | { |
103b76ee | 858 | kmem_cache_destroy(global.slab_cache); |
32eb6bcf CW |
859 | } |
860 | ||
103b76ee CW |
861 | static struct i915_global_active global = { { |
862 | .shrink = i915_global_active_shrink, | |
863 | .exit = i915_global_active_exit, | |
864 | } }; | |
865 | ||
866 | int __init i915_global_active_init(void) | |
5f5c139d | 867 | { |
103b76ee CW |
868 | global.slab_cache = KMEM_CACHE(active_node, SLAB_HWCACHE_ALIGN); |
869 | if (!global.slab_cache) | |
870 | return -ENOMEM; | |
871 | ||
872 | i915_global_register(&global.base); | |
873 | return 0; | |
5f5c139d | 874 | } |