Commit | Line | Data |
---|---|---|
24f90d66 | 1 | // SPDX-License-Identifier: MIT |
c4d52feb | 2 | /* |
c4d52feb CW |
3 | * Copyright © 2019 Intel Corporation |
4 | */ | |
5 | ||
10be98a7 CW |
6 | #include "gem/i915_gem_context.h" |
7 | #include "gem/i915_gem_pm.h" | |
8 | ||
c4d52feb | 9 | #include "i915_drv.h" |
e03b5906 | 10 | #include "i915_trace.h" |
112ed2d3 | 11 | |
c4d52feb | 12 | #include "intel_context.h" |
112ed2d3 | 13 | #include "intel_engine.h" |
79ffac85 | 14 | #include "intel_engine_pm.h" |
2871ea85 | 15 | #include "intel_ring.h" |
c4d52feb | 16 | |
2dcec7d3 | 17 | static struct kmem_cache *slab_ce; |
c4d52feb | 18 | |
5e2a0419 | 19 | static struct intel_context *intel_context_alloc(void) |
c4d52feb | 20 | { |
2dcec7d3 | 21 | return kmem_cache_zalloc(slab_ce, GFP_KERNEL); |
c4d52feb CW |
22 | } |
23 | ||
9261a1db | 24 | static void rcu_context_free(struct rcu_head *rcu) |
c4d52feb | 25 | { |
9261a1db CW |
26 | struct intel_context *ce = container_of(rcu, typeof(*ce), rcu); |
27 | ||
e03b5906 | 28 | trace_intel_context_free(ce); |
2dcec7d3 | 29 | kmem_cache_free(slab_ce, ce); |
c4d52feb CW |
30 | } |
31 | ||
9261a1db CW |
32 | void intel_context_free(struct intel_context *ce) |
33 | { | |
34 | call_rcu(&ce->rcu, rcu_context_free); | |
35 | } | |
36 | ||
c4d52feb | 37 | struct intel_context * |
e6ba7648 | 38 | intel_context_create(struct intel_engine_cs *engine) |
c4d52feb | 39 | { |
5e2a0419 | 40 | struct intel_context *ce; |
c4d52feb CW |
41 | |
42 | ce = intel_context_alloc(); | |
43 | if (!ce) | |
44 | return ERR_PTR(-ENOMEM); | |
45 | ||
e6ba7648 | 46 | intel_context_init(ce, engine); |
e03b5906 | 47 | trace_intel_context_create(ce); |
5e2a0419 | 48 | return ce; |
c4d52feb CW |
49 | } |
50 | ||
89f98d63 CW |
51 | int intel_context_alloc_state(struct intel_context *ce) |
52 | { | |
53 | int err = 0; | |
54 | ||
55 | if (mutex_lock_interruptible(&ce->pin_mutex)) | |
56 | return -EINTR; | |
57 | ||
58 | if (!test_bit(CONTEXT_ALLOC_BIT, &ce->flags)) { | |
373f27f2 CW |
59 | if (intel_context_is_banned(ce)) { |
60 | err = -EIO; | |
61 | goto unlock; | |
62 | } | |
63 | ||
89f98d63 CW |
64 | err = ce->ops->alloc(ce); |
65 | if (unlikely(err)) | |
66 | goto unlock; | |
67 | ||
68 | set_bit(CONTEXT_ALLOC_BIT, &ce->flags); | |
69 | } | |
70 | ||
71 | unlock: | |
72 | mutex_unlock(&ce->pin_mutex); | |
73 | return err; | |
74 | } | |
75 | ||
b11b28ea CW |
76 | static int intel_context_active_acquire(struct intel_context *ce) |
77 | { | |
78 | int err; | |
79 | ||
e5429340 CW |
80 | __i915_active_acquire(&ce->active); |
81 | ||
a88afcfa MB |
82 | if (intel_context_is_barrier(ce) || intel_engine_uses_guc(ce->engine) || |
83 | intel_context_is_parallel(ce)) | |
e5429340 | 84 | return 0; |
b11b28ea CW |
85 | |
86 | /* Preallocate tracking nodes */ | |
e5429340 CW |
87 | err = i915_active_acquire_preallocate_barrier(&ce->active, |
88 | ce->engine); | |
89 | if (err) | |
90 | i915_active_release(&ce->active); | |
b11b28ea | 91 | |
e5429340 | 92 | return err; |
b11b28ea CW |
93 | } |
94 | ||
95 | static void intel_context_active_release(struct intel_context *ce) | |
96 | { | |
97 | /* Nodes preallocated in intel_context_active() */ | |
98 | i915_active_acquire_barrier(&ce->active); | |
99 | i915_active_release(&ce->active); | |
100 | } | |
101 | ||
47b08693 | 102 | static int __context_pin_state(struct i915_vma *vma, struct i915_gem_ww_ctx *ww) |
c4d52feb | 103 | { |
ccd20945 | 104 | unsigned int bias = i915_ggtt_pin_bias(vma) | PIN_OFFSET_BIAS; |
ce476c80 | 105 | int err; |
c4d52feb | 106 | |
47b08693 | 107 | err = i915_ggtt_pin(vma, ww, 0, bias | PIN_HIGH); |
ce476c80 CW |
108 | if (err) |
109 | return err; | |
110 | ||
1b8bfc57 CW |
111 | err = i915_active_acquire(&vma->active); |
112 | if (err) | |
113 | goto err_unpin; | |
114 | ||
ce476c80 CW |
115 | /* |
116 | * And mark it as a globally pinned object to let the shrinker know | |
117 | * it cannot reclaim the object until we release it. | |
118 | */ | |
1aff1903 | 119 | i915_vma_make_unshrinkable(vma); |
ce476c80 CW |
120 | vma->obj->mm.dirty = true; |
121 | ||
122 | return 0; | |
1b8bfc57 CW |
123 | |
124 | err_unpin: | |
125 | i915_vma_unpin(vma); | |
126 | return err; | |
ce476c80 CW |
127 | } |
128 | ||
129 | static void __context_unpin_state(struct i915_vma *vma) | |
130 | { | |
1aff1903 | 131 | i915_vma_make_shrinkable(vma); |
1b8bfc57 | 132 | i915_active_release(&vma->active); |
99013b10 | 133 | __i915_vma_unpin(vma); |
ce476c80 CW |
134 | } |
135 | ||
47b08693 ML |
136 | static int __ring_active(struct intel_ring *ring, |
137 | struct i915_gem_ww_ctx *ww) | |
8ccfc20a CW |
138 | { |
139 | int err; | |
140 | ||
47b08693 | 141 | err = intel_ring_pin(ring, ww); |
8ccfc20a CW |
142 | if (err) |
143 | return err; | |
144 | ||
5a383d44 | 145 | err = i915_active_acquire(&ring->vma->active); |
8ccfc20a | 146 | if (err) |
5a383d44 | 147 | goto err_pin; |
8ccfc20a CW |
148 | |
149 | return 0; | |
150 | ||
5a383d44 CW |
151 | err_pin: |
152 | intel_ring_unpin(ring); | |
8ccfc20a CW |
153 | return err; |
154 | } | |
155 | ||
156 | static void __ring_retire(struct intel_ring *ring) | |
157 | { | |
8ccfc20a | 158 | i915_active_release(&ring->vma->active); |
5a383d44 | 159 | intel_ring_unpin(ring); |
8ccfc20a CW |
160 | } |
161 | ||
47b08693 ML |
162 | static int intel_context_pre_pin(struct intel_context *ce, |
163 | struct i915_gem_ww_ctx *ww) | |
3999a708 ML |
164 | { |
165 | int err; | |
166 | ||
167 | CE_TRACE(ce, "active\n"); | |
168 | ||
47b08693 | 169 | err = __ring_active(ce->ring, ww); |
3999a708 ML |
170 | if (err) |
171 | return err; | |
172 | ||
47b08693 | 173 | err = intel_timeline_pin(ce->timeline, ww); |
3999a708 ML |
174 | if (err) |
175 | goto err_ring; | |
176 | ||
177 | if (!ce->state) | |
178 | return 0; | |
179 | ||
47b08693 | 180 | err = __context_pin_state(ce->state, ww); |
3999a708 ML |
181 | if (err) |
182 | goto err_timeline; | |
183 | ||
184 | ||
185 | return 0; | |
186 | ||
187 | err_timeline: | |
188 | intel_timeline_unpin(ce->timeline); | |
189 | err_ring: | |
190 | __ring_retire(ce->ring); | |
191 | return err; | |
192 | } | |
193 | ||
194 | static void intel_context_post_unpin(struct intel_context *ce) | |
195 | { | |
196 | if (ce->state) | |
197 | __context_unpin_state(ce->state); | |
198 | ||
199 | intel_timeline_unpin(ce->timeline); | |
200 | __ring_retire(ce->ring); | |
201 | } | |
202 | ||
47b08693 ML |
203 | int __intel_context_do_pin_ww(struct intel_context *ce, |
204 | struct i915_gem_ww_ctx *ww) | |
3999a708 ML |
205 | { |
206 | bool handoff = false; | |
207 | void *vaddr; | |
208 | int err = 0; | |
209 | ||
210 | if (unlikely(!test_bit(CONTEXT_ALLOC_BIT, &ce->flags))) { | |
211 | err = intel_context_alloc_state(ce); | |
212 | if (err) | |
213 | return err; | |
214 | } | |
215 | ||
216 | /* | |
217 | * We always pin the context/ring/timeline here, to ensure a pin | |
218 | * refcount for __intel_context_active(), which prevent a lock | |
219 | * inversion of ce->pin_mutex vs dma_resv_lock(). | |
220 | */ | |
47b08693 ML |
221 | |
222 | err = i915_gem_object_lock(ce->timeline->hwsp_ggtt->obj, ww); | |
e6e1a304 | 223 | if (!err) |
47b08693 ML |
224 | err = i915_gem_object_lock(ce->ring->vma->obj, ww); |
225 | if (!err && ce->state) | |
226 | err = i915_gem_object_lock(ce->state->obj, ww); | |
227 | if (!err) | |
228 | err = intel_context_pre_pin(ce, ww); | |
3999a708 ML |
229 | if (err) |
230 | return err; | |
231 | ||
10ceccb8 | 232 | err = ce->ops->pre_pin(ce, ww, &vaddr); |
3999a708 ML |
233 | if (err) |
234 | goto err_ctx_unpin; | |
235 | ||
10ceccb8 | 236 | err = i915_active_acquire(&ce->active); |
3999a708 | 237 | if (err) |
10ceccb8 | 238 | goto err_post_unpin; |
3999a708 ML |
239 | |
240 | err = mutex_lock_interruptible(&ce->pin_mutex); | |
241 | if (err) | |
10ceccb8 | 242 | goto err_release; |
3999a708 | 243 | |
f61eae18 MB |
244 | intel_engine_pm_might_get(ce->engine); |
245 | ||
3999a708 ML |
246 | if (unlikely(intel_context_is_closed(ce))) { |
247 | err = -ENOENT; | |
248 | goto err_unlock; | |
249 | } | |
250 | ||
251 | if (likely(!atomic_add_unless(&ce->pin_count, 1, 0))) { | |
252 | err = intel_context_active_acquire(ce); | |
253 | if (unlikely(err)) | |
254 | goto err_unlock; | |
255 | ||
256 | err = ce->ops->pin(ce, vaddr); | |
257 | if (err) { | |
258 | intel_context_active_release(ce); | |
259 | goto err_unlock; | |
260 | } | |
261 | ||
262 | CE_TRACE(ce, "pin ring:{start:%08x, head:%04x, tail:%04x}\n", | |
263 | i915_ggtt_offset(ce->ring->vma), | |
264 | ce->ring->head, ce->ring->tail); | |
265 | ||
266 | handoff = true; | |
267 | smp_mb__before_atomic(); /* flush pin before it is visible */ | |
268 | atomic_inc(&ce->pin_count); | |
269 | } | |
270 | ||
271 | GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */ | |
272 | ||
e03b5906 MB |
273 | trace_intel_context_do_pin(ce); |
274 | ||
3999a708 ML |
275 | err_unlock: |
276 | mutex_unlock(&ce->pin_mutex); | |
10ceccb8 MA |
277 | err_release: |
278 | i915_active_release(&ce->active); | |
3999a708 ML |
279 | err_post_unpin: |
280 | if (!handoff) | |
281 | ce->ops->post_unpin(ce); | |
3999a708 ML |
282 | err_ctx_unpin: |
283 | intel_context_post_unpin(ce); | |
e0ee152f TH |
284 | |
285 | /* | |
286 | * Unlock the hwsp_ggtt object since it's shared. | |
287 | * In principle we can unlock all the global state locked above | |
288 | * since it's pinned and doesn't need fencing, and will | |
289 | * thus remain resident until it is explicitly unpinned. | |
290 | */ | |
291 | i915_gem_ww_unlock_single(ce->timeline->hwsp_ggtt->obj); | |
292 | ||
3999a708 ML |
293 | return err; |
294 | } | |
295 | ||
47b08693 ML |
296 | int __intel_context_do_pin(struct intel_context *ce) |
297 | { | |
298 | struct i915_gem_ww_ctx ww; | |
299 | int err; | |
300 | ||
301 | i915_gem_ww_ctx_init(&ww, true); | |
302 | retry: | |
303 | err = __intel_context_do_pin_ww(ce, &ww); | |
304 | if (err == -EDEADLK) { | |
305 | err = i915_gem_ww_ctx_backoff(&ww); | |
306 | if (!err) | |
307 | goto retry; | |
308 | } | |
309 | i915_gem_ww_ctx_fini(&ww); | |
310 | return err; | |
311 | } | |
312 | ||
e0717063 | 313 | void __intel_context_do_unpin(struct intel_context *ce, int sub) |
3999a708 | 314 | { |
e0717063 | 315 | if (!atomic_sub_and_test(sub, &ce->pin_count)) |
3999a708 ML |
316 | return; |
317 | ||
318 | CE_TRACE(ce, "unpin\n"); | |
319 | ce->ops->unpin(ce); | |
320 | ce->ops->post_unpin(ce); | |
321 | ||
322 | /* | |
323 | * Once released, we may asynchronously drop the active reference. | |
324 | * As that may be the only reference keeping the context alive, | |
325 | * take an extra now so that it is not freed before we finish | |
326 | * dereferencing it. | |
327 | */ | |
328 | intel_context_get(ce); | |
329 | intel_context_active_release(ce); | |
e03b5906 | 330 | trace_intel_context_do_unpin(ce); |
3999a708 ML |
331 | intel_context_put(ce); |
332 | } | |
333 | ||
12c255b5 | 334 | static void __intel_context_retire(struct i915_active *active) |
ce476c80 CW |
335 | { |
336 | struct intel_context *ce = container_of(active, typeof(*ce), active); | |
337 | ||
1883a0a4 TU |
338 | CE_TRACE(ce, "retire runtime: { total:%lluns, avg:%lluns }\n", |
339 | intel_context_get_total_runtime_ns(ce), | |
340 | intel_context_get_avg_runtime_ns(ce)); | |
cba17e5d | 341 | |
f70de8d2 | 342 | set_bit(CONTEXT_VALID_BIT, &ce->flags); |
3999a708 | 343 | intel_context_post_unpin(ce); |
ce476c80 | 344 | intel_context_put(ce); |
c4d52feb CW |
345 | } |
346 | ||
12c255b5 | 347 | static int __intel_context_active(struct i915_active *active) |
ce476c80 | 348 | { |
12c255b5 | 349 | struct intel_context *ce = container_of(active, typeof(*ce), active); |
ce476c80 | 350 | |
ce476c80 CW |
351 | intel_context_get(ce); |
352 | ||
3999a708 | 353 | /* everything should already be activated by intel_context_pre_pin() */ |
47b08693 ML |
354 | GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->ring->vma->active)); |
355 | __intel_ring_pin(ce->ring); | |
09c5ab38 | 356 | |
47b08693 | 357 | __intel_timeline_pin(ce->timeline); |
75d0a7f3 | 358 | |
3999a708 ML |
359 | if (ce->state) { |
360 | GEM_WARN_ON(!i915_active_acquire_if_busy(&ce->state->active)); | |
361 | __i915_vma_pin(ce->state); | |
362 | i915_vma_make_unshrinkable(ce->state); | |
363 | } | |
ce476c80 | 364 | |
d8af05ff | 365 | return 0; |
d8af05ff CW |
366 | } |
367 | ||
44505168 | 368 | static int |
cdc1e6e2 HD |
369 | sw_fence_dummy_notify(struct i915_sw_fence *sf, |
370 | enum i915_sw_fence_notify state) | |
62eaf0ae MB |
371 | { |
372 | return NOTIFY_DONE; | |
373 | } | |
374 | ||
12c255b5 | 375 | void |
9261a1db | 376 | intel_context_init(struct intel_context *ce, struct intel_engine_cs *engine) |
ce476c80 | 377 | { |
12c255b5 | 378 | GEM_BUG_ON(!engine->cops); |
e6ba7648 | 379 | GEM_BUG_ON(!engine->gt->vm); |
12c255b5 CW |
380 | |
381 | kref_init(&ce->ref); | |
382 | ||
12c255b5 CW |
383 | ce->engine = engine; |
384 | ce->ops = engine->cops; | |
385 | ce->sseu = engine->sseu; | |
74e4b909 JE |
386 | ce->ring = NULL; |
387 | ce->ring_size = SZ_4K; | |
e6ba7648 | 388 | |
bb6287cb | 389 | ewma_runtime_init(&ce->stats.runtime.avg); |
1883a0a4 | 390 | |
e6ba7648 | 391 | ce->vm = i915_vm_get(engine->gt->vm); |
12c255b5 | 392 | |
2bfdf302 CW |
393 | /* NB ce->signal_link/lock is used under RCU */ |
394 | spin_lock_init(&ce->signal_lock); | |
12c255b5 CW |
395 | INIT_LIST_HEAD(&ce->signals); |
396 | ||
397 | mutex_init(&ce->pin_mutex); | |
398 | ||
3a4cdf19 | 399 | spin_lock_init(&ce->guc_state.lock); |
b208f2d5 | 400 | INIT_LIST_HEAD(&ce->guc_state.fences); |
af5bc9f2 | 401 | INIT_LIST_HEAD(&ce->guc_state.requests); |
d1cee2d3 | 402 | |
d1249022 | 403 | ce->guc_id.id = GUC_INVALID_CONTEXT_ID; |
3cb3e343 | 404 | INIT_LIST_HEAD(&ce->guc_id.link); |
3a4cdf19 | 405 | |
1a52faed MB |
406 | INIT_LIST_HEAD(&ce->destroyed_link); |
407 | ||
3897df4c MB |
408 | INIT_LIST_HEAD(&ce->parallel.child_list); |
409 | ||
62eaf0ae MB |
410 | /* |
411 | * Initialize fence to be complete as this is expected to be complete | |
412 | * unless there is a pending schedule disable outstanding. | |
413 | */ | |
52d66c06 MB |
414 | i915_sw_fence_init(&ce->guc_state.blocked, |
415 | sw_fence_dummy_notify); | |
416 | i915_sw_fence_commit(&ce->guc_state.blocked); | |
62eaf0ae | 417 | |
b1e3177b | 418 | i915_active_init(&ce->active, |
c3b14760 | 419 | __intel_context_active, __intel_context_retire, 0); |
c4d52feb CW |
420 | } |
421 | ||
df8cf31e CW |
422 | void intel_context_fini(struct intel_context *ce) |
423 | { | |
3897df4c MB |
424 | struct intel_context *child, *next; |
425 | ||
75d0a7f3 CW |
426 | if (ce->timeline) |
427 | intel_timeline_put(ce->timeline); | |
f5d974f9 CW |
428 | i915_vm_put(ce->vm); |
429 | ||
3897df4c MB |
430 | /* Need to put the creation ref for the children */ |
431 | if (intel_context_is_parent(ce)) | |
432 | for_each_child_safe(ce, child, next) | |
433 | intel_context_put(child); | |
434 | ||
df8cf31e CW |
435 | mutex_destroy(&ce->pin_mutex); |
436 | i915_active_fini(&ce->active); | |
d576b31b | 437 | i915_sw_fence_fini(&ce->guc_state.blocked); |
df8cf31e CW |
438 | } |
439 | ||
2dcec7d3 | 440 | void i915_context_module_exit(void) |
c4d52feb | 441 | { |
2dcec7d3 | 442 | kmem_cache_destroy(slab_ce); |
c4d52feb CW |
443 | } |
444 | ||
2dcec7d3 | 445 | int __init i915_context_module_init(void) |
c4d52feb | 446 | { |
2dcec7d3 DV |
447 | slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN); |
448 | if (!slab_ce) | |
c4d52feb CW |
449 | return -ENOMEM; |
450 | ||
c4d52feb CW |
451 | return 0; |
452 | } | |
6eee33e8 CW |
453 | |
454 | void intel_context_enter_engine(struct intel_context *ce) | |
455 | { | |
79ffac85 | 456 | intel_engine_pm_get(ce->engine); |
531958f6 | 457 | intel_timeline_enter(ce->timeline); |
6eee33e8 CW |
458 | } |
459 | ||
460 | void intel_context_exit_engine(struct intel_context *ce) | |
461 | { | |
531958f6 | 462 | intel_timeline_exit(ce->timeline); |
af7a272e | 463 | intel_engine_pm_put(ce->engine); |
6eee33e8 | 464 | } |
5e2a0419 | 465 | |
a9877da2 CW |
466 | int intel_context_prepare_remote_request(struct intel_context *ce, |
467 | struct i915_request *rq) | |
468 | { | |
75d0a7f3 | 469 | struct intel_timeline *tl = ce->timeline; |
a9877da2 CW |
470 | int err; |
471 | ||
472 | /* Only suitable for use in remotely modifying this context */ | |
9f3ccd40 | 473 | GEM_BUG_ON(rq->context == ce); |
a9877da2 | 474 | |
d19d71fc | 475 | if (rcu_access_pointer(rq->timeline) != tl) { /* timeline sharing! */ |
340c4c8d | 476 | /* Queue this switch after current activity by this context. */ |
b1e3177b | 477 | err = i915_active_fence_set(&tl->last_request, rq); |
340c4c8d | 478 | if (err) |
25ffd4b1 | 479 | return err; |
340c4c8d | 480 | } |
a9877da2 CW |
481 | |
482 | /* | |
483 | * Guarantee context image and the timeline remains pinned until the | |
484 | * modifying request is retired by setting the ce activity tracker. | |
485 | * | |
486 | * But we only need to take one pin on the account of it. Or in other | |
487 | * words transfer the pinned ce object to tracked active request. | |
488 | */ | |
489 | GEM_BUG_ON(i915_active_is_idle(&ce->active)); | |
d19d71fc | 490 | return i915_active_add_request(&ce->active, rq); |
a9877da2 CW |
491 | } |
492 | ||
5e2a0419 CW |
493 | struct i915_request *intel_context_create_request(struct intel_context *ce) |
494 | { | |
8a929c9e | 495 | struct i915_gem_ww_ctx ww; |
5e2a0419 CW |
496 | struct i915_request *rq; |
497 | int err; | |
498 | ||
8a929c9e ML |
499 | i915_gem_ww_ctx_init(&ww, true); |
500 | retry: | |
501 | err = intel_context_pin_ww(ce, &ww); | |
502 | if (!err) { | |
503 | rq = i915_request_create(ce); | |
504 | intel_context_unpin(ce); | |
505 | } else if (err == -EDEADLK) { | |
506 | err = i915_gem_ww_ctx_backoff(&ww); | |
507 | if (!err) | |
508 | goto retry; | |
159ace7f | 509 | rq = ERR_PTR(err); |
8a929c9e ML |
510 | } else { |
511 | rq = ERR_PTR(err); | |
512 | } | |
5e2a0419 | 513 | |
8a929c9e | 514 | i915_gem_ww_ctx_fini(&ww); |
5e2a0419 | 515 | |
dd878c0c ML |
516 | if (IS_ERR(rq)) |
517 | return rq; | |
518 | ||
519 | /* | |
520 | * timeline->mutex should be the inner lock, but is used as outer lock. | |
521 | * Hack around this to shut up lockdep in selftests.. | |
522 | */ | |
523 | lockdep_unpin_lock(&ce->timeline->mutex, rq->cookie); | |
524 | mutex_release(&ce->timeline->mutex.dep_map, _RET_IP_); | |
525 | mutex_acquire(&ce->timeline->mutex.dep_map, SINGLE_DEPTH_NESTING, 0, _RET_IP_); | |
526 | rq->cookie = lockdep_pin_lock(&ce->timeline->mutex); | |
527 | ||
5e2a0419 CW |
528 | return rq; |
529 | } | |
d8af05ff | 530 | |
86d8ddc7 | 531 | struct i915_request *intel_context_get_active_request(struct intel_context *ce) |
573ba126 | 532 | { |
872758db | 533 | struct intel_context *parent = intel_context_to_parent(ce); |
573ba126 MB |
534 | struct i915_request *rq, *active = NULL; |
535 | unsigned long flags; | |
536 | ||
537 | GEM_BUG_ON(!intel_engine_uses_guc(ce->engine)); | |
538 | ||
872758db MB |
539 | /* |
540 | * We search the parent list to find an active request on the submitted | |
541 | * context. The parent list contains the requests for all the contexts | |
542 | * in the relationship so we have to do a compare of each request's | |
543 | * context. | |
544 | */ | |
545 | spin_lock_irqsave(&parent->guc_state.lock, flags); | |
546 | list_for_each_entry_reverse(rq, &parent->guc_state.requests, | |
573ba126 | 547 | sched.link) { |
872758db MB |
548 | if (rq->context != ce) |
549 | continue; | |
573ba126 MB |
550 | if (i915_request_completed(rq)) |
551 | break; | |
552 | ||
553 | active = rq; | |
554 | } | |
86d8ddc7 JH |
555 | if (active) |
556 | active = i915_request_get_rcu(active); | |
872758db | 557 | spin_unlock_irqrestore(&parent->guc_state.lock, flags); |
573ba126 MB |
558 | |
559 | return active; | |
560 | } | |
561 | ||
3897df4c MB |
562 | void intel_context_bind_parent_child(struct intel_context *parent, |
563 | struct intel_context *child) | |
564 | { | |
565 | /* | |
566 | * Callers responsibility to validate that this function is used | |
567 | * correctly but we use GEM_BUG_ON here ensure that they do. | |
568 | */ | |
3897df4c MB |
569 | GEM_BUG_ON(intel_context_is_pinned(parent)); |
570 | GEM_BUG_ON(intel_context_is_child(parent)); | |
571 | GEM_BUG_ON(intel_context_is_pinned(child)); | |
572 | GEM_BUG_ON(intel_context_is_child(child)); | |
573 | GEM_BUG_ON(intel_context_is_parent(child)); | |
574 | ||
5851387a | 575 | parent->parallel.child_index = parent->parallel.number_children++; |
3897df4c MB |
576 | list_add_tail(&child->parallel.child_link, |
577 | &parent->parallel.child_list); | |
578 | child->parallel.parent = parent; | |
579 | } | |
580 | ||
bb6287cb TU |
581 | u64 intel_context_get_total_runtime_ns(const struct intel_context *ce) |
582 | { | |
583 | u64 total, active; | |
584 | ||
585 | total = ce->stats.runtime.total; | |
586 | if (ce->ops->flags & COPS_RUNTIME_CYCLES) | |
587 | total *= ce->engine->gt->clock_period_ns; | |
588 | ||
589 | active = READ_ONCE(ce->stats.active); | |
590 | if (active) | |
591 | active = intel_context_clock() - active; | |
592 | ||
593 | return total + active; | |
594 | } | |
595 | ||
596 | u64 intel_context_get_avg_runtime_ns(struct intel_context *ce) | |
597 | { | |
598 | u64 avg = ewma_runtime_read(&ce->stats.runtime.avg); | |
599 | ||
600 | if (ce->ops->flags & COPS_RUNTIME_CYCLES) | |
601 | avg *= ce->engine->gt->clock_period_ns; | |
602 | ||
603 | return avg; | |
604 | } | |
605 | ||
45c64ecf TU |
606 | bool intel_context_ban(struct intel_context *ce, struct i915_request *rq) |
607 | { | |
608 | bool ret = intel_context_set_banned(ce); | |
609 | ||
610 | trace_intel_context_ban(ce); | |
611 | ||
612 | if (ce->ops->revoke) | |
613 | ce->ops->revoke(ce, rq, | |
614 | INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS); | |
615 | ||
616 | return ret; | |
617 | } | |
618 | ||
70234728 | 619 | bool intel_context_revoke(struct intel_context *ce) |
45c64ecf TU |
620 | { |
621 | bool ret = intel_context_set_exiting(ce); | |
622 | ||
623 | if (ce->ops->revoke) | |
70234728 | 624 | ce->ops->revoke(ce, NULL, ce->engine->props.preempt_timeout_ms); |
45c64ecf TU |
625 | |
626 | return ret; | |
627 | } | |
628 | ||
d8af05ff CW |
629 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
630 | #include "selftest_context.c" | |
631 | #endif |