Commit | Line | Data |
---|---|---|
05235c53 CW |
1 | /* |
2 | * Copyright © 2008-2015 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
fa545cbf | 25 | #include <linux/prefetch.h> |
b52992c0 | 26 | #include <linux/dma-fence-array.h> |
e6017571 IM |
27 | #include <linux/sched.h> |
28 | #include <linux/sched/clock.h> | |
f361bf4a | 29 | #include <linux/sched/signal.h> |
fa545cbf | 30 | |
05235c53 CW |
31 | #include "i915_drv.h" |
32 | ||
f54d1867 | 33 | static const char *i915_fence_get_driver_name(struct dma_fence *fence) |
04769652 CW |
34 | { |
35 | return "i915"; | |
36 | } | |
37 | ||
f54d1867 | 38 | static const char *i915_fence_get_timeline_name(struct dma_fence *fence) |
04769652 | 39 | { |
e61e0f51 CW |
40 | /* |
41 | * The timeline struct (as part of the ppgtt underneath a context) | |
05506b5b CW |
42 | * may be freed when the request is no longer in use by the GPU. |
43 | * We could extend the life of a context to beyond that of all | |
44 | * fences, possibly keeping the hw resource around indefinitely, | |
45 | * or we just give them a false name. Since | |
46 | * dma_fence_ops.get_timeline_name is a debug feature, the occasional | |
47 | * lie seems justifiable. | |
48 | */ | |
49 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | |
50 | return "signaled"; | |
51 | ||
73cb9701 | 52 | return to_request(fence)->timeline->common->name; |
04769652 CW |
53 | } |
54 | ||
f54d1867 | 55 | static bool i915_fence_signaled(struct dma_fence *fence) |
04769652 | 56 | { |
e61e0f51 | 57 | return i915_request_completed(to_request(fence)); |
04769652 CW |
58 | } |
59 | ||
f54d1867 | 60 | static bool i915_fence_enable_signaling(struct dma_fence *fence) |
04769652 | 61 | { |
6f9ec414 | 62 | return intel_engine_enable_signaling(to_request(fence), true); |
04769652 CW |
63 | } |
64 | ||
f54d1867 | 65 | static signed long i915_fence_wait(struct dma_fence *fence, |
04769652 | 66 | bool interruptible, |
e95433c7 | 67 | signed long timeout) |
04769652 | 68 | { |
e61e0f51 | 69 | return i915_request_wait(to_request(fence), interruptible, timeout); |
04769652 CW |
70 | } |
71 | ||
f54d1867 | 72 | static void i915_fence_release(struct dma_fence *fence) |
04769652 | 73 | { |
e61e0f51 | 74 | struct i915_request *rq = to_request(fence); |
04769652 | 75 | |
e61e0f51 CW |
76 | /* |
77 | * The request is put onto a RCU freelist (i.e. the address | |
fc158405 CW |
78 | * is immediately reused), mark the fences as being freed now. |
79 | * Otherwise the debugobjects for the fences are only marked as | |
80 | * freed when the slab cache itself is freed, and so we would get | |
81 | * caught trying to reuse dead objects. | |
82 | */ | |
e61e0f51 | 83 | i915_sw_fence_fini(&rq->submit); |
fc158405 | 84 | |
e61e0f51 | 85 | kmem_cache_free(rq->i915->requests, rq); |
04769652 CW |
86 | } |
87 | ||
f54d1867 | 88 | const struct dma_fence_ops i915_fence_ops = { |
04769652 CW |
89 | .get_driver_name = i915_fence_get_driver_name, |
90 | .get_timeline_name = i915_fence_get_timeline_name, | |
91 | .enable_signaling = i915_fence_enable_signaling, | |
92 | .signaled = i915_fence_signaled, | |
93 | .wait = i915_fence_wait, | |
94 | .release = i915_fence_release, | |
04769652 CW |
95 | }; |
96 | ||
05235c53 | 97 | static inline void |
e61e0f51 | 98 | i915_request_remove_from_client(struct i915_request *request) |
05235c53 | 99 | { |
c8659efa | 100 | struct drm_i915_file_private *file_priv; |
05235c53 | 101 | |
c8659efa | 102 | file_priv = request->file_priv; |
05235c53 CW |
103 | if (!file_priv) |
104 | return; | |
105 | ||
106 | spin_lock(&file_priv->mm.lock); | |
c8659efa CW |
107 | if (request->file_priv) { |
108 | list_del(&request->client_link); | |
109 | request->file_priv = NULL; | |
110 | } | |
05235c53 | 111 | spin_unlock(&file_priv->mm.lock); |
05235c53 CW |
112 | } |
113 | ||
52e54209 CW |
114 | static struct i915_dependency * |
115 | i915_dependency_alloc(struct drm_i915_private *i915) | |
116 | { | |
117 | return kmem_cache_alloc(i915->dependencies, GFP_KERNEL); | |
118 | } | |
119 | ||
120 | static void | |
121 | i915_dependency_free(struct drm_i915_private *i915, | |
122 | struct i915_dependency *dep) | |
123 | { | |
124 | kmem_cache_free(i915->dependencies, dep); | |
125 | } | |
126 | ||
127 | static void | |
128 | __i915_priotree_add_dependency(struct i915_priotree *pt, | |
129 | struct i915_priotree *signal, | |
130 | struct i915_dependency *dep, | |
131 | unsigned long flags) | |
132 | { | |
20311bd3 | 133 | INIT_LIST_HEAD(&dep->dfs_link); |
52e54209 CW |
134 | list_add(&dep->wait_link, &signal->waiters_list); |
135 | list_add(&dep->signal_link, &pt->signalers_list); | |
136 | dep->signaler = signal; | |
137 | dep->flags = flags; | |
138 | } | |
139 | ||
140 | static int | |
141 | i915_priotree_add_dependency(struct drm_i915_private *i915, | |
142 | struct i915_priotree *pt, | |
143 | struct i915_priotree *signal) | |
144 | { | |
145 | struct i915_dependency *dep; | |
146 | ||
147 | dep = i915_dependency_alloc(i915); | |
148 | if (!dep) | |
149 | return -ENOMEM; | |
150 | ||
151 | __i915_priotree_add_dependency(pt, signal, dep, I915_DEPENDENCY_ALLOC); | |
152 | return 0; | |
153 | } | |
154 | ||
155 | static void | |
156 | i915_priotree_fini(struct drm_i915_private *i915, struct i915_priotree *pt) | |
157 | { | |
158 | struct i915_dependency *dep, *next; | |
159 | ||
6c067579 | 160 | GEM_BUG_ON(!list_empty(&pt->link)); |
20311bd3 | 161 | |
83cc84c5 CW |
162 | /* |
163 | * Everyone we depended upon (the fences we wait to be signaled) | |
52e54209 CW |
164 | * should retire before us and remove themselves from our list. |
165 | * However, retirement is run independently on each timeline and | |
166 | * so we may be called out-of-order. | |
167 | */ | |
168 | list_for_each_entry_safe(dep, next, &pt->signalers_list, signal_link) { | |
83cc84c5 CW |
169 | GEM_BUG_ON(!i915_priotree_signaled(dep->signaler)); |
170 | GEM_BUG_ON(!list_empty(&dep->dfs_link)); | |
171 | ||
52e54209 CW |
172 | list_del(&dep->wait_link); |
173 | if (dep->flags & I915_DEPENDENCY_ALLOC) | |
174 | i915_dependency_free(i915, dep); | |
175 | } | |
176 | ||
177 | /* Remove ourselves from everyone who depends upon us */ | |
178 | list_for_each_entry_safe(dep, next, &pt->waiters_list, wait_link) { | |
83cc84c5 CW |
179 | GEM_BUG_ON(dep->signaler != pt); |
180 | GEM_BUG_ON(!list_empty(&dep->dfs_link)); | |
181 | ||
52e54209 CW |
182 | list_del(&dep->signal_link); |
183 | if (dep->flags & I915_DEPENDENCY_ALLOC) | |
184 | i915_dependency_free(i915, dep); | |
185 | } | |
186 | } | |
187 | ||
188 | static void | |
189 | i915_priotree_init(struct i915_priotree *pt) | |
190 | { | |
191 | INIT_LIST_HEAD(&pt->signalers_list); | |
192 | INIT_LIST_HEAD(&pt->waiters_list); | |
6c067579 | 193 | INIT_LIST_HEAD(&pt->link); |
7d1ea609 | 194 | pt->priority = I915_PRIORITY_INVALID; |
52e54209 CW |
195 | } |
196 | ||
12d3173b CW |
197 | static int reset_all_global_seqno(struct drm_i915_private *i915, u32 seqno) |
198 | { | |
12d3173b CW |
199 | struct intel_engine_cs *engine; |
200 | enum intel_engine_id id; | |
201 | int ret; | |
202 | ||
203 | /* Carefully retire all requests without writing to the rings */ | |
204 | ret = i915_gem_wait_for_idle(i915, | |
205 | I915_WAIT_INTERRUPTIBLE | | |
206 | I915_WAIT_LOCKED); | |
207 | if (ret) | |
208 | return ret; | |
209 | ||
d9b13c4d CW |
210 | GEM_BUG_ON(i915->gt.active_requests); |
211 | ||
12d3173b CW |
212 | /* If the seqno wraps around, we need to clear the breadcrumb rbtree */ |
213 | for_each_engine(engine, i915, id) { | |
ae351beb CW |
214 | struct i915_gem_timeline *timeline; |
215 | struct intel_timeline *tl = engine->timeline; | |
12d3173b | 216 | |
d9b13c4d CW |
217 | GEM_TRACE("%s seqno %d -> %d\n", |
218 | engine->name, tl->seqno, seqno); | |
219 | ||
12d3173b | 220 | if (!i915_seqno_passed(seqno, tl->seqno)) { |
f41d19be CW |
221 | /* Flush any waiters before we reuse the seqno */ |
222 | intel_engine_disarm_breadcrumbs(engine); | |
93eef7d6 | 223 | GEM_BUG_ON(!list_empty(&engine->breadcrumbs.signals)); |
12d3173b CW |
224 | } |
225 | ||
4d53568c CW |
226 | /* Check we are idle before we fiddle with hw state! */ |
227 | GEM_BUG_ON(!intel_engine_is_idle(engine)); | |
228 | GEM_BUG_ON(i915_gem_active_isset(&engine->timeline->last_request)); | |
229 | ||
12d3173b | 230 | /* Finally reset hw state */ |
12d3173b | 231 | intel_engine_init_global_seqno(engine, seqno); |
2ca9faa5 | 232 | tl->seqno = seqno; |
12d3173b | 233 | |
ae351beb | 234 | list_for_each_entry(timeline, &i915->gt.timelines, link) |
7e8894e9 CW |
235 | memset(timeline->engine[id].global_sync, 0, |
236 | sizeof(timeline->engine[id].global_sync)); | |
12d3173b CW |
237 | } |
238 | ||
239 | return 0; | |
240 | } | |
241 | ||
242 | int i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno) | |
243 | { | |
e61e0f51 | 244 | struct drm_i915_private *i915 = to_i915(dev); |
12d3173b | 245 | |
e61e0f51 | 246 | lockdep_assert_held(&i915->drm.struct_mutex); |
12d3173b CW |
247 | |
248 | if (seqno == 0) | |
249 | return -EINVAL; | |
250 | ||
e61e0f51 CW |
251 | /* HWS page needs to be set less than what we will inject to ring */ |
252 | return reset_all_global_seqno(i915, seqno - 1); | |
12d3173b CW |
253 | } |
254 | ||
636918f1 | 255 | static void mark_busy(struct drm_i915_private *i915) |
12d3173b | 256 | { |
636918f1 CW |
257 | if (i915->gt.awake) |
258 | return; | |
259 | ||
260 | GEM_BUG_ON(!i915->gt.active_requests); | |
261 | ||
262 | intel_runtime_pm_get_noresume(i915); | |
b6876374 TU |
263 | |
264 | /* | |
265 | * It seems that the DMC likes to transition between the DC states a lot | |
266 | * when there are no connected displays (no active power domains) during | |
267 | * command submission. | |
268 | * | |
269 | * This activity has negative impact on the performance of the chip with | |
270 | * huge latencies observed in the interrupt handler and elsewhere. | |
271 | * | |
272 | * Work around it by grabbing a GT IRQ power domain whilst there is any | |
273 | * GT activity, preventing any DC state transitions. | |
274 | */ | |
275 | intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ); | |
276 | ||
636918f1 | 277 | i915->gt.awake = true; |
6f56103d CW |
278 | if (unlikely(++i915->gt.epoch == 0)) /* keep 0 as invalid */ |
279 | i915->gt.epoch = 1; | |
636918f1 CW |
280 | |
281 | intel_enable_gt_powersave(i915); | |
282 | i915_update_gfx_val(i915); | |
283 | if (INTEL_GEN(i915) >= 6) | |
284 | gen6_rps_busy(i915); | |
feff0dc6 | 285 | i915_pmu_gt_unparked(i915); |
636918f1 | 286 | |
aba5e278 CW |
287 | intel_engines_unpark(i915); |
288 | ||
88923048 CW |
289 | i915_queue_hangcheck(i915); |
290 | ||
636918f1 CW |
291 | queue_delayed_work(i915->wq, |
292 | &i915->gt.retire_work, | |
293 | round_jiffies_up_relative(HZ)); | |
294 | } | |
295 | ||
296 | static int reserve_engine(struct intel_engine_cs *engine) | |
297 | { | |
298 | struct drm_i915_private *i915 = engine->i915; | |
12d3173b CW |
299 | u32 active = ++engine->timeline->inflight_seqnos; |
300 | u32 seqno = engine->timeline->seqno; | |
301 | int ret; | |
302 | ||
303 | /* Reservation is fine until we need to wrap around */ | |
636918f1 CW |
304 | if (unlikely(add_overflows(seqno, active))) { |
305 | ret = reset_all_global_seqno(i915, 0); | |
306 | if (ret) { | |
307 | engine->timeline->inflight_seqnos--; | |
308 | return ret; | |
309 | } | |
12d3173b CW |
310 | } |
311 | ||
636918f1 CW |
312 | if (!i915->gt.active_requests++) |
313 | mark_busy(i915); | |
314 | ||
12d3173b CW |
315 | return 0; |
316 | } | |
317 | ||
636918f1 | 318 | static void unreserve_engine(struct intel_engine_cs *engine) |
9b6586ae | 319 | { |
636918f1 CW |
320 | struct drm_i915_private *i915 = engine->i915; |
321 | ||
322 | if (!--i915->gt.active_requests) { | |
323 | /* Cancel the mark_busy() from our reserve_engine() */ | |
324 | GEM_BUG_ON(!i915->gt.awake); | |
325 | mod_delayed_work(i915->wq, | |
326 | &i915->gt.idle_work, | |
327 | msecs_to_jiffies(100)); | |
328 | } | |
329 | ||
9b6586ae CW |
330 | GEM_BUG_ON(!engine->timeline->inflight_seqnos); |
331 | engine->timeline->inflight_seqnos--; | |
332 | } | |
333 | ||
fa545cbf | 334 | void i915_gem_retire_noop(struct i915_gem_active *active, |
e61e0f51 | 335 | struct i915_request *request) |
fa545cbf CW |
336 | { |
337 | /* Space left intentionally blank */ | |
338 | } | |
339 | ||
e61e0f51 | 340 | static void advance_ring(struct i915_request *request) |
cbb60b4b CW |
341 | { |
342 | unsigned int tail; | |
343 | ||
e61e0f51 CW |
344 | /* |
345 | * We know the GPU must have read the request to have | |
cbb60b4b CW |
346 | * sent us the seqno + interrupt, so use the position |
347 | * of tail of the request to update the last known position | |
348 | * of the GPU head. | |
349 | * | |
350 | * Note this requires that we are always called in request | |
351 | * completion order. | |
352 | */ | |
e6ba9992 | 353 | if (list_is_last(&request->ring_link, &request->ring->request_list)) { |
e61e0f51 CW |
354 | /* |
355 | * We may race here with execlists resubmitting this request | |
e6ba9992 CW |
356 | * as we retire it. The resubmission will move the ring->tail |
357 | * forwards (to request->wa_tail). We either read the | |
358 | * current value that was written to hw, or the value that | |
359 | * is just about to be. Either works, if we miss the last two | |
360 | * noops - they are safe to be replayed on a reset. | |
361 | */ | |
36620032 | 362 | tail = READ_ONCE(request->tail); |
e6ba9992 | 363 | } else { |
cbb60b4b | 364 | tail = request->postfix; |
e6ba9992 | 365 | } |
cbb60b4b CW |
366 | list_del(&request->ring_link); |
367 | ||
368 | request->ring->head = tail; | |
369 | } | |
370 | ||
e61e0f51 | 371 | static void free_capture_list(struct i915_request *request) |
b0fd47ad | 372 | { |
e61e0f51 | 373 | struct i915_capture_list *capture; |
b0fd47ad CW |
374 | |
375 | capture = request->capture_list; | |
376 | while (capture) { | |
e61e0f51 | 377 | struct i915_capture_list *next = capture->next; |
b0fd47ad CW |
378 | |
379 | kfree(capture); | |
380 | capture = next; | |
381 | } | |
382 | } | |
383 | ||
e61e0f51 | 384 | static void i915_request_retire(struct i915_request *request) |
05235c53 | 385 | { |
e8a9c58f | 386 | struct intel_engine_cs *engine = request->engine; |
fa545cbf CW |
387 | struct i915_gem_active *active, *next; |
388 | ||
d9b13c4d CW |
389 | GEM_TRACE("%s(%d) fence %llx:%d, global_seqno %d\n", |
390 | engine->name, intel_engine_get_seqno(engine), | |
391 | request->fence.context, request->fence.seqno, | |
392 | request->global_seqno); | |
393 | ||
4c7d62c6 | 394 | lockdep_assert_held(&request->i915->drm.struct_mutex); |
48bc2a4a | 395 | GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit)); |
e61e0f51 | 396 | GEM_BUG_ON(!i915_request_completed(request)); |
4302055b | 397 | GEM_BUG_ON(!request->i915->gt.active_requests); |
4c7d62c6 | 398 | |
e61e0f51 | 399 | trace_i915_request_retire(request); |
80b204bc | 400 | |
e8a9c58f | 401 | spin_lock_irq(&engine->timeline->lock); |
e95433c7 | 402 | list_del_init(&request->link); |
e8a9c58f | 403 | spin_unlock_irq(&engine->timeline->lock); |
05235c53 | 404 | |
636918f1 | 405 | unreserve_engine(request->engine); |
cbb60b4b | 406 | advance_ring(request); |
05235c53 | 407 | |
b0fd47ad CW |
408 | free_capture_list(request); |
409 | ||
e61e0f51 CW |
410 | /* |
411 | * Walk through the active list, calling retire on each. This allows | |
fa545cbf CW |
412 | * objects to track their GPU activity and mark themselves as idle |
413 | * when their *last* active request is completed (updating state | |
414 | * tracking lists for eviction, active references for GEM, etc). | |
415 | * | |
416 | * As the ->retire() may free the node, we decouple it first and | |
417 | * pass along the auxiliary information (to avoid dereferencing | |
418 | * the node after the callback). | |
419 | */ | |
420 | list_for_each_entry_safe(active, next, &request->active_list, link) { | |
e61e0f51 CW |
421 | /* |
422 | * In microbenchmarks or focusing upon time inside the kernel, | |
fa545cbf CW |
423 | * we may spend an inordinate amount of time simply handling |
424 | * the retirement of requests and processing their callbacks. | |
425 | * Of which, this loop itself is particularly hot due to the | |
426 | * cache misses when jumping around the list of i915_gem_active. | |
427 | * So we try to keep this loop as streamlined as possible and | |
428 | * also prefetch the next i915_gem_active to try and hide | |
429 | * the likely cache miss. | |
430 | */ | |
431 | prefetchw(next); | |
432 | ||
433 | INIT_LIST_HEAD(&active->link); | |
0eafec6d | 434 | RCU_INIT_POINTER(active->request, NULL); |
fa545cbf CW |
435 | |
436 | active->retire(active, request); | |
437 | } | |
438 | ||
e61e0f51 | 439 | i915_request_remove_from_client(request); |
05235c53 | 440 | |
e5e1fc47 | 441 | /* Retirement decays the ban score as it is a sign of ctx progress */ |
77b25a97 | 442 | atomic_dec_if_positive(&request->ctx->ban_score); |
e5e1fc47 | 443 | |
e61e0f51 CW |
444 | /* |
445 | * The backing object for the context is done after switching to the | |
e8a9c58f CW |
446 | * *next* context. Therefore we cannot retire the previous context until |
447 | * the next context has already started running. However, since we | |
e61e0f51 | 448 | * cannot take the required locks at i915_request_submit() we |
e8a9c58f CW |
449 | * defer the unpinning of the active context to now, retirement of |
450 | * the subsequent request. | |
451 | */ | |
452 | if (engine->last_retired_context) | |
453 | engine->context_unpin(engine, engine->last_retired_context); | |
454 | engine->last_retired_context = request->ctx; | |
d07f0e59 | 455 | |
7b92c1bd | 456 | spin_lock_irq(&request->lock); |
b7a3f33b CW |
457 | if (!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &request->fence.flags)) |
458 | dma_fence_signal_locked(&request->fence); | |
459 | if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) | |
460 | intel_engine_cancel_signaling(request); | |
253a2817 CW |
461 | if (request->waitboost) { |
462 | GEM_BUG_ON(!atomic_read(&request->i915->gt_pm.rps.num_waiters)); | |
463 | atomic_dec(&request->i915->gt_pm.rps.num_waiters); | |
464 | } | |
7b92c1bd | 465 | spin_unlock_irq(&request->lock); |
52e54209 CW |
466 | |
467 | i915_priotree_fini(request->i915, &request->priotree); | |
e61e0f51 | 468 | i915_request_put(request); |
05235c53 CW |
469 | } |
470 | ||
e61e0f51 | 471 | void i915_request_retire_upto(struct i915_request *rq) |
05235c53 | 472 | { |
e61e0f51 CW |
473 | struct intel_engine_cs *engine = rq->engine; |
474 | struct i915_request *tmp; | |
05235c53 | 475 | |
e61e0f51 CW |
476 | lockdep_assert_held(&rq->i915->drm.struct_mutex); |
477 | GEM_BUG_ON(!i915_request_completed(rq)); | |
4ffd6e0c | 478 | |
e61e0f51 | 479 | if (list_empty(&rq->link)) |
e95433c7 | 480 | return; |
05235c53 CW |
481 | |
482 | do { | |
73cb9701 | 483 | tmp = list_first_entry(&engine->timeline->requests, |
efdf7c06 | 484 | typeof(*tmp), link); |
05235c53 | 485 | |
e61e0f51 CW |
486 | i915_request_retire(tmp); |
487 | } while (tmp != rq); | |
05235c53 CW |
488 | } |
489 | ||
9b6586ae | 490 | static u32 timeline_get_seqno(struct intel_timeline *tl) |
05235c53 | 491 | { |
9b6586ae | 492 | return ++tl->seqno; |
28176ef4 CW |
493 | } |
494 | ||
4ccfee92 CW |
495 | static void move_to_timeline(struct i915_request *request, |
496 | struct intel_timeline *timeline) | |
497 | { | |
498 | GEM_BUG_ON(request->timeline == request->engine->timeline); | |
499 | lockdep_assert_held(&request->engine->timeline->lock); | |
500 | ||
501 | spin_lock(&request->timeline->lock); | |
502 | list_move_tail(&request->link, &timeline->requests); | |
503 | spin_unlock(&request->timeline->lock); | |
504 | } | |
505 | ||
e61e0f51 | 506 | void __i915_request_submit(struct i915_request *request) |
5590af3e | 507 | { |
73cb9701 | 508 | struct intel_engine_cs *engine = request->engine; |
f2d13290 | 509 | u32 seqno; |
5590af3e | 510 | |
d9b13c4d CW |
511 | GEM_TRACE("%s fence %llx:%d -> global_seqno %d\n", |
512 | request->engine->name, | |
513 | request->fence.context, request->fence.seqno, | |
0e59c209 | 514 | engine->timeline->seqno + 1); |
d9b13c4d | 515 | |
e60a870d | 516 | GEM_BUG_ON(!irqs_disabled()); |
67520415 | 517 | lockdep_assert_held(&engine->timeline->lock); |
e60a870d | 518 | |
2d453c78 | 519 | GEM_BUG_ON(request->global_seqno); |
5590af3e | 520 | |
4ccfee92 | 521 | seqno = timeline_get_seqno(engine->timeline); |
f2d13290 CW |
522 | GEM_BUG_ON(!seqno); |
523 | GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), seqno)); | |
524 | ||
f2d13290 CW |
525 | /* We may be recursing from the signal callback of another i915 fence */ |
526 | spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); | |
527 | request->global_seqno = seqno; | |
528 | if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) | |
f7b02a52 | 529 | intel_engine_enable_signaling(request, false); |
f2d13290 CW |
530 | spin_unlock(&request->lock); |
531 | ||
caddfe71 CW |
532 | engine->emit_breadcrumb(request, |
533 | request->ring->vaddr + request->postfix); | |
5590af3e | 534 | |
4ccfee92 CW |
535 | /* Transfer from per-context onto the global per-engine timeline */ |
536 | move_to_timeline(request, engine->timeline); | |
80b204bc | 537 | |
e61e0f51 | 538 | trace_i915_request_execute(request); |
158863fb | 539 | |
fe49789f | 540 | wake_up_all(&request->execute); |
d55ac5bf CW |
541 | } |
542 | ||
e61e0f51 | 543 | void i915_request_submit(struct i915_request *request) |
d55ac5bf CW |
544 | { |
545 | struct intel_engine_cs *engine = request->engine; | |
546 | unsigned long flags; | |
23902e49 | 547 | |
d55ac5bf CW |
548 | /* Will be called from irq-context when using foreign fences. */ |
549 | spin_lock_irqsave(&engine->timeline->lock, flags); | |
550 | ||
e61e0f51 | 551 | __i915_request_submit(request); |
d55ac5bf CW |
552 | |
553 | spin_unlock_irqrestore(&engine->timeline->lock, flags); | |
554 | } | |
555 | ||
e61e0f51 | 556 | void __i915_request_unsubmit(struct i915_request *request) |
d55ac5bf | 557 | { |
d6a2289d | 558 | struct intel_engine_cs *engine = request->engine; |
d55ac5bf | 559 | |
d9b13c4d CW |
560 | GEM_TRACE("%s fence %llx:%d <- global_seqno %d\n", |
561 | request->engine->name, | |
562 | request->fence.context, request->fence.seqno, | |
563 | request->global_seqno); | |
564 | ||
e60a870d | 565 | GEM_BUG_ON(!irqs_disabled()); |
67520415 | 566 | lockdep_assert_held(&engine->timeline->lock); |
48bc2a4a | 567 | |
e61e0f51 CW |
568 | /* |
569 | * Only unwind in reverse order, required so that the per-context list | |
d6a2289d CW |
570 | * is kept in seqno/ring order. |
571 | */ | |
2d453c78 | 572 | GEM_BUG_ON(!request->global_seqno); |
d6a2289d | 573 | GEM_BUG_ON(request->global_seqno != engine->timeline->seqno); |
c7cc144d CW |
574 | GEM_BUG_ON(i915_seqno_passed(intel_engine_get_seqno(engine), |
575 | request->global_seqno)); | |
d6a2289d | 576 | engine->timeline->seqno--; |
80b204bc | 577 | |
d6a2289d CW |
578 | /* We may be recursing from the signal callback of another i915 fence */ |
579 | spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); | |
580 | request->global_seqno = 0; | |
581 | if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) | |
582 | intel_engine_cancel_signaling(request); | |
583 | spin_unlock(&request->lock); | |
584 | ||
585 | /* Transfer back from the global per-engine timeline to per-context */ | |
4ccfee92 | 586 | move_to_timeline(request, request->timeline); |
d6a2289d | 587 | |
e61e0f51 CW |
588 | /* |
589 | * We don't need to wake_up any waiters on request->execute, they | |
d6a2289d | 590 | * will get woken by any other event or us re-adding this request |
e61e0f51 | 591 | * to the engine timeline (__i915_request_submit()). The waiters |
d6a2289d CW |
592 | * should be quite adapt at finding that the request now has a new |
593 | * global_seqno to the one they went to sleep on. | |
594 | */ | |
595 | } | |
596 | ||
e61e0f51 | 597 | void i915_request_unsubmit(struct i915_request *request) |
d6a2289d CW |
598 | { |
599 | struct intel_engine_cs *engine = request->engine; | |
600 | unsigned long flags; | |
601 | ||
602 | /* Will be called from irq-context when using foreign fences. */ | |
603 | spin_lock_irqsave(&engine->timeline->lock, flags); | |
604 | ||
e61e0f51 | 605 | __i915_request_unsubmit(request); |
d6a2289d CW |
606 | |
607 | spin_unlock_irqrestore(&engine->timeline->lock, flags); | |
5590af3e CW |
608 | } |
609 | ||
23902e49 | 610 | static int __i915_sw_fence_call |
d55ac5bf | 611 | submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) |
23902e49 | 612 | { |
e61e0f51 | 613 | struct i915_request *request = |
48bc2a4a | 614 | container_of(fence, typeof(*request), submit); |
48bc2a4a CW |
615 | |
616 | switch (state) { | |
617 | case FENCE_COMPLETE: | |
e61e0f51 | 618 | trace_i915_request_submit(request); |
af7a8ffa | 619 | /* |
e61e0f51 CW |
620 | * We need to serialize use of the submit_request() callback |
621 | * with its hotplugging performed during an emergency | |
622 | * i915_gem_set_wedged(). We use the RCU mechanism to mark the | |
623 | * critical section in order to force i915_gem_set_wedged() to | |
624 | * wait until the submit_request() is completed before | |
625 | * proceeding. | |
af7a8ffa DV |
626 | */ |
627 | rcu_read_lock(); | |
d55ac5bf | 628 | request->engine->submit_request(request); |
af7a8ffa | 629 | rcu_read_unlock(); |
48bc2a4a CW |
630 | break; |
631 | ||
632 | case FENCE_FREE: | |
e61e0f51 | 633 | i915_request_put(request); |
48bc2a4a CW |
634 | break; |
635 | } | |
636 | ||
23902e49 CW |
637 | return NOTIFY_DONE; |
638 | } | |
639 | ||
8e637178 | 640 | /** |
e61e0f51 | 641 | * i915_request_alloc - allocate a request structure |
8e637178 CW |
642 | * |
643 | * @engine: engine that we wish to issue the request on. | |
644 | * @ctx: context that the request will be associated with. | |
8e637178 CW |
645 | * |
646 | * Returns a pointer to the allocated request if successful, | |
647 | * or an error code if not. | |
648 | */ | |
e61e0f51 CW |
649 | struct i915_request * |
650 | i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) | |
05235c53 | 651 | { |
e61e0f51 CW |
652 | struct drm_i915_private *i915 = engine->i915; |
653 | struct i915_request *rq; | |
266a240b | 654 | struct intel_ring *ring; |
05235c53 CW |
655 | int ret; |
656 | ||
e61e0f51 | 657 | lockdep_assert_held(&i915->drm.struct_mutex); |
28176ef4 | 658 | |
e7af3116 CW |
659 | /* |
660 | * Preempt contexts are reserved for exclusive use to inject a | |
661 | * preemption context switch. They are never to be used for any trivial | |
662 | * request! | |
663 | */ | |
e61e0f51 | 664 | GEM_BUG_ON(ctx == i915->preempt_context); |
e7af3116 | 665 | |
e61e0f51 CW |
666 | /* |
667 | * ABI: Before userspace accesses the GPU (e.g. execbuffer), report | |
6ffb7d07 | 668 | * EIO if the GPU is already wedged. |
05235c53 | 669 | */ |
e61e0f51 | 670 | if (i915_terminally_wedged(&i915->gpu_error)) |
6ffb7d07 | 671 | return ERR_PTR(-EIO); |
05235c53 | 672 | |
e61e0f51 CW |
673 | /* |
674 | * Pinning the contexts may generate requests in order to acquire | |
e8a9c58f CW |
675 | * GGTT space, so do this first before we reserve a seqno for |
676 | * ourselves. | |
677 | */ | |
266a240b CW |
678 | ring = engine->context_pin(engine, ctx); |
679 | if (IS_ERR(ring)) | |
680 | return ERR_CAST(ring); | |
681 | GEM_BUG_ON(!ring); | |
28176ef4 | 682 | |
636918f1 | 683 | ret = reserve_engine(engine); |
e8a9c58f CW |
684 | if (ret) |
685 | goto err_unpin; | |
686 | ||
3fef5cda CW |
687 | ret = intel_ring_wait_for_space(ring, MIN_SPACE_FOR_ADD_REQUEST); |
688 | if (ret) | |
689 | goto err_unreserve; | |
690 | ||
9b5f4e5e | 691 | /* Move the oldest request to the slab-cache (if not in use!) */ |
e61e0f51 CW |
692 | rq = list_first_entry_or_null(&engine->timeline->requests, |
693 | typeof(*rq), link); | |
694 | if (rq && i915_request_completed(rq)) | |
695 | i915_request_retire(rq); | |
9b5f4e5e | 696 | |
e61e0f51 CW |
697 | /* |
698 | * Beware: Dragons be flying overhead. | |
5a198b8c CW |
699 | * |
700 | * We use RCU to look up requests in flight. The lookups may | |
701 | * race with the request being allocated from the slab freelist. | |
702 | * That is the request we are writing to here, may be in the process | |
1426f715 | 703 | * of being read by __i915_gem_active_get_rcu(). As such, |
5a198b8c CW |
704 | * we have to be very careful when overwriting the contents. During |
705 | * the RCU lookup, we change chase the request->engine pointer, | |
65e4760e | 706 | * read the request->global_seqno and increment the reference count. |
5a198b8c CW |
707 | * |
708 | * The reference count is incremented atomically. If it is zero, | |
709 | * the lookup knows the request is unallocated and complete. Otherwise, | |
710 | * it is either still in use, or has been reallocated and reset | |
f54d1867 CW |
711 | * with dma_fence_init(). This increment is safe for release as we |
712 | * check that the request we have a reference to and matches the active | |
5a198b8c CW |
713 | * request. |
714 | * | |
715 | * Before we increment the refcount, we chase the request->engine | |
716 | * pointer. We must not call kmem_cache_zalloc() or else we set | |
717 | * that pointer to NULL and cause a crash during the lookup. If | |
718 | * we see the request is completed (based on the value of the | |
719 | * old engine and seqno), the lookup is complete and reports NULL. | |
720 | * If we decide the request is not completed (new engine or seqno), | |
721 | * then we grab a reference and double check that it is still the | |
722 | * active request - which it won't be and restart the lookup. | |
723 | * | |
724 | * Do not use kmem_cache_zalloc() here! | |
725 | */ | |
e61e0f51 CW |
726 | rq = kmem_cache_alloc(i915->requests, |
727 | GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); | |
728 | if (unlikely(!rq)) { | |
31c70f97 | 729 | /* Ratelimit ourselves to prevent oom from malicious clients */ |
e61e0f51 | 730 | ret = i915_gem_wait_for_idle(i915, |
31c70f97 CW |
731 | I915_WAIT_LOCKED | |
732 | I915_WAIT_INTERRUPTIBLE); | |
733 | if (ret) | |
734 | goto err_unreserve; | |
735 | ||
f0111b04 CW |
736 | /* |
737 | * We've forced the client to stall and catch up with whatever | |
738 | * backlog there might have been. As we are assuming that we | |
739 | * caused the mempressure, now is an opportune time to | |
740 | * recover as much memory from the request pool as is possible. | |
741 | * Having already penalized the client to stall, we spend | |
742 | * a little extra time to re-optimise page allocation. | |
743 | */ | |
e61e0f51 | 744 | kmem_cache_shrink(i915->requests); |
f0111b04 CW |
745 | rcu_barrier(); /* Recover the TYPESAFE_BY_RCU pages */ |
746 | ||
e61e0f51 CW |
747 | rq = kmem_cache_alloc(i915->requests, GFP_KERNEL); |
748 | if (!rq) { | |
31c70f97 CW |
749 | ret = -ENOMEM; |
750 | goto err_unreserve; | |
751 | } | |
28176ef4 | 752 | } |
05235c53 | 753 | |
e61e0f51 CW |
754 | rq->timeline = i915_gem_context_lookup_timeline(ctx, engine); |
755 | GEM_BUG_ON(rq->timeline == engine->timeline); | |
73cb9701 | 756 | |
e61e0f51 CW |
757 | spin_lock_init(&rq->lock); |
758 | dma_fence_init(&rq->fence, | |
f54d1867 | 759 | &i915_fence_ops, |
e61e0f51 CW |
760 | &rq->lock, |
761 | rq->timeline->fence_context, | |
762 | timeline_get_seqno(rq->timeline)); | |
04769652 | 763 | |
48bc2a4a | 764 | /* We bump the ref for the fence chain */ |
e61e0f51 CW |
765 | i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify); |
766 | init_waitqueue_head(&rq->execute); | |
5590af3e | 767 | |
e61e0f51 | 768 | i915_priotree_init(&rq->priotree); |
52e54209 | 769 | |
e61e0f51 CW |
770 | INIT_LIST_HEAD(&rq->active_list); |
771 | rq->i915 = i915; | |
772 | rq->engine = engine; | |
773 | rq->ctx = ctx; | |
774 | rq->ring = ring; | |
05235c53 | 775 | |
5a198b8c | 776 | /* No zalloc, must clear what we need by hand */ |
e61e0f51 CW |
777 | rq->global_seqno = 0; |
778 | rq->signaling.wait.seqno = 0; | |
779 | rq->file_priv = NULL; | |
780 | rq->batch = NULL; | |
781 | rq->capture_list = NULL; | |
782 | rq->waitboost = false; | |
5a198b8c | 783 | |
05235c53 CW |
784 | /* |
785 | * Reserve space in the ring buffer for all the commands required to | |
786 | * eventually emit this request. This is to guarantee that the | |
e61e0f51 | 787 | * i915_request_add() call can't fail. Note that the reserve may need |
05235c53 CW |
788 | * to be redone if the request is not actually submitted straight |
789 | * away, e.g. because a GPU scheduler has deferred it. | |
790 | */ | |
e61e0f51 CW |
791 | rq->reserved_space = MIN_SPACE_FOR_ADD_REQUEST; |
792 | GEM_BUG_ON(rq->reserved_space < engine->emit_breadcrumb_sz); | |
05235c53 | 793 | |
2113184c CW |
794 | /* |
795 | * Record the position of the start of the request so that | |
d045446d CW |
796 | * should we detect the updated seqno part-way through the |
797 | * GPU processing the request, we never over-estimate the | |
798 | * position of the head. | |
799 | */ | |
e61e0f51 | 800 | rq->head = rq->ring->emit; |
d045446d | 801 | |
2113184c | 802 | /* Unconditionally invalidate GPU caches and TLBs. */ |
e61e0f51 | 803 | ret = engine->emit_flush(rq, EMIT_INVALIDATE); |
2113184c | 804 | if (ret) |
b1c24a61 | 805 | goto err_unwind; |
2113184c | 806 | |
e61e0f51 | 807 | ret = engine->request_alloc(rq); |
b1c24a61 CW |
808 | if (ret) |
809 | goto err_unwind; | |
2113184c | 810 | |
9b6586ae | 811 | /* Check that we didn't interrupt ourselves with a new request */ |
e61e0f51 CW |
812 | GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno); |
813 | return rq; | |
05235c53 | 814 | |
b1c24a61 | 815 | err_unwind: |
e61e0f51 | 816 | rq->ring->emit = rq->head; |
b1c24a61 | 817 | |
1618bdb8 | 818 | /* Make sure we didn't add ourselves to external state before freeing */ |
e61e0f51 CW |
819 | GEM_BUG_ON(!list_empty(&rq->active_list)); |
820 | GEM_BUG_ON(!list_empty(&rq->priotree.signalers_list)); | |
821 | GEM_BUG_ON(!list_empty(&rq->priotree.waiters_list)); | |
1618bdb8 | 822 | |
e61e0f51 | 823 | kmem_cache_free(i915->requests, rq); |
28176ef4 | 824 | err_unreserve: |
636918f1 | 825 | unreserve_engine(engine); |
e8a9c58f CW |
826 | err_unpin: |
827 | engine->context_unpin(engine, ctx); | |
8e637178 | 828 | return ERR_PTR(ret); |
05235c53 CW |
829 | } |
830 | ||
a2bc4695 | 831 | static int |
e61e0f51 | 832 | i915_request_await_request(struct i915_request *to, struct i915_request *from) |
a2bc4695 | 833 | { |
85e17f59 | 834 | int ret; |
a2bc4695 CW |
835 | |
836 | GEM_BUG_ON(to == from); | |
ceae14bd | 837 | GEM_BUG_ON(to->timeline == from->timeline); |
a2bc4695 | 838 | |
e61e0f51 | 839 | if (i915_request_completed(from)) |
ade0b0c9 CW |
840 | return 0; |
841 | ||
52e54209 CW |
842 | if (to->engine->schedule) { |
843 | ret = i915_priotree_add_dependency(to->i915, | |
844 | &to->priotree, | |
845 | &from->priotree); | |
846 | if (ret < 0) | |
847 | return ret; | |
848 | } | |
849 | ||
73cb9701 CW |
850 | if (to->engine == from->engine) { |
851 | ret = i915_sw_fence_await_sw_fence_gfp(&to->submit, | |
852 | &from->submit, | |
2abe2f84 | 853 | I915_FENCE_GFP); |
73cb9701 CW |
854 | return ret < 0 ? ret : 0; |
855 | } | |
856 | ||
6b567085 CW |
857 | if (to->engine->semaphore.sync_to) { |
858 | u32 seqno; | |
65e4760e | 859 | |
6b567085 | 860 | GEM_BUG_ON(!from->engine->semaphore.signal); |
fc9d4d2b | 861 | |
e61e0f51 | 862 | seqno = i915_request_global_seqno(from); |
6b567085 | 863 | if (!seqno) |
fc9d4d2b | 864 | goto await_dma_fence; |
49f08598 | 865 | |
fc9d4d2b CW |
866 | if (seqno <= to->timeline->global_sync[from->engine->id]) |
867 | return 0; | |
868 | ||
869 | trace_i915_gem_ring_sync_to(to, from); | |
a2bc4695 CW |
870 | ret = to->engine->semaphore.sync_to(to, from); |
871 | if (ret) | |
872 | return ret; | |
fc9d4d2b CW |
873 | |
874 | to->timeline->global_sync[from->engine->id] = seqno; | |
6b567085 | 875 | return 0; |
a2bc4695 CW |
876 | } |
877 | ||
fc9d4d2b CW |
878 | await_dma_fence: |
879 | ret = i915_sw_fence_await_dma_fence(&to->submit, | |
880 | &from->fence, 0, | |
2abe2f84 | 881 | I915_FENCE_GFP); |
fc9d4d2b | 882 | return ret < 0 ? ret : 0; |
a2bc4695 CW |
883 | } |
884 | ||
b52992c0 | 885 | int |
e61e0f51 | 886 | i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) |
b52992c0 | 887 | { |
29ef3fa9 CW |
888 | struct dma_fence **child = &fence; |
889 | unsigned int nchild = 1; | |
b52992c0 | 890 | int ret; |
b52992c0 | 891 | |
e61e0f51 CW |
892 | /* |
893 | * Note that if the fence-array was created in signal-on-any mode, | |
b52992c0 CW |
894 | * we should *not* decompose it into its individual fences. However, |
895 | * we don't currently store which mode the fence-array is operating | |
896 | * in. Fortunately, the only user of signal-on-any is private to | |
897 | * amdgpu and we should not see any incoming fence-array from | |
898 | * sync-file being in signal-on-any mode. | |
899 | */ | |
29ef3fa9 CW |
900 | if (dma_fence_is_array(fence)) { |
901 | struct dma_fence_array *array = to_dma_fence_array(fence); | |
902 | ||
903 | child = array->fences; | |
904 | nchild = array->num_fences; | |
905 | GEM_BUG_ON(!nchild); | |
906 | } | |
b52992c0 | 907 | |
29ef3fa9 CW |
908 | do { |
909 | fence = *child++; | |
910 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | |
911 | continue; | |
b52992c0 | 912 | |
ceae14bd CW |
913 | /* |
914 | * Requests on the same timeline are explicitly ordered, along | |
e61e0f51 | 915 | * with their dependencies, by i915_request_add() which ensures |
ceae14bd CW |
916 | * that requests are submitted in-order through each ring. |
917 | */ | |
e61e0f51 | 918 | if (fence->context == rq->fence.context) |
ceae14bd CW |
919 | continue; |
920 | ||
47979480 | 921 | /* Squash repeated waits to the same timelines */ |
e61e0f51 CW |
922 | if (fence->context != rq->i915->mm.unordered_timeline && |
923 | intel_timeline_sync_is_later(rq->timeline, fence)) | |
47979480 CW |
924 | continue; |
925 | ||
29ef3fa9 | 926 | if (dma_fence_is_i915(fence)) |
e61e0f51 | 927 | ret = i915_request_await_request(rq, to_request(fence)); |
b52992c0 | 928 | else |
e61e0f51 | 929 | ret = i915_sw_fence_await_dma_fence(&rq->submit, fence, |
29ef3fa9 | 930 | I915_FENCE_TIMEOUT, |
2abe2f84 | 931 | I915_FENCE_GFP); |
b52992c0 CW |
932 | if (ret < 0) |
933 | return ret; | |
47979480 CW |
934 | |
935 | /* Record the latest fence used against each timeline */ | |
e61e0f51 CW |
936 | if (fence->context != rq->i915->mm.unordered_timeline) |
937 | intel_timeline_sync_set(rq->timeline, fence); | |
29ef3fa9 | 938 | } while (--nchild); |
b52992c0 CW |
939 | |
940 | return 0; | |
941 | } | |
942 | ||
a2bc4695 | 943 | /** |
e61e0f51 | 944 | * i915_request_await_object - set this request to (async) wait upon a bo |
a2bc4695 CW |
945 | * @to: request we are wishing to use |
946 | * @obj: object which may be in use on another ring. | |
d8802126 | 947 | * @write: whether the wait is on behalf of a writer |
a2bc4695 CW |
948 | * |
949 | * This code is meant to abstract object synchronization with the GPU. | |
950 | * Conceptually we serialise writes between engines inside the GPU. | |
951 | * We only allow one engine to write into a buffer at any time, but | |
952 | * multiple readers. To ensure each has a coherent view of memory, we must: | |
953 | * | |
954 | * - If there is an outstanding write request to the object, the new | |
955 | * request must wait for it to complete (either CPU or in hw, requests | |
956 | * on the same ring will be naturally ordered). | |
957 | * | |
958 | * - If we are a write request (pending_write_domain is set), the new | |
959 | * request must wait for outstanding read requests to complete. | |
960 | * | |
961 | * Returns 0 if successful, else propagates up the lower layer error. | |
962 | */ | |
963 | int | |
e61e0f51 CW |
964 | i915_request_await_object(struct i915_request *to, |
965 | struct drm_i915_gem_object *obj, | |
966 | bool write) | |
a2bc4695 | 967 | { |
d07f0e59 CW |
968 | struct dma_fence *excl; |
969 | int ret = 0; | |
a2bc4695 CW |
970 | |
971 | if (write) { | |
d07f0e59 CW |
972 | struct dma_fence **shared; |
973 | unsigned int count, i; | |
974 | ||
975 | ret = reservation_object_get_fences_rcu(obj->resv, | |
976 | &excl, &count, &shared); | |
977 | if (ret) | |
978 | return ret; | |
979 | ||
980 | for (i = 0; i < count; i++) { | |
e61e0f51 | 981 | ret = i915_request_await_dma_fence(to, shared[i]); |
d07f0e59 CW |
982 | if (ret) |
983 | break; | |
984 | ||
985 | dma_fence_put(shared[i]); | |
986 | } | |
987 | ||
988 | for (; i < count; i++) | |
989 | dma_fence_put(shared[i]); | |
990 | kfree(shared); | |
a2bc4695 | 991 | } else { |
d07f0e59 | 992 | excl = reservation_object_get_excl_rcu(obj->resv); |
a2bc4695 CW |
993 | } |
994 | ||
d07f0e59 CW |
995 | if (excl) { |
996 | if (ret == 0) | |
e61e0f51 | 997 | ret = i915_request_await_dma_fence(to, excl); |
a2bc4695 | 998 | |
d07f0e59 | 999 | dma_fence_put(excl); |
a2bc4695 CW |
1000 | } |
1001 | ||
d07f0e59 | 1002 | return ret; |
a2bc4695 CW |
1003 | } |
1004 | ||
05235c53 CW |
1005 | /* |
1006 | * NB: This function is not allowed to fail. Doing so would mean the the | |
1007 | * request is not being tracked for completion but the work itself is | |
1008 | * going to happen on the hardware. This would be a Bad Thing(tm). | |
1009 | */ | |
e61e0f51 | 1010 | void __i915_request_add(struct i915_request *request, bool flush_caches) |
05235c53 | 1011 | { |
95b2ab56 CW |
1012 | struct intel_engine_cs *engine = request->engine; |
1013 | struct intel_ring *ring = request->ring; | |
73cb9701 | 1014 | struct intel_timeline *timeline = request->timeline; |
e61e0f51 | 1015 | struct i915_request *prev; |
73dec95e | 1016 | u32 *cs; |
caddfe71 | 1017 | int err; |
05235c53 | 1018 | |
d9b13c4d CW |
1019 | GEM_TRACE("%s fence %llx:%d\n", |
1020 | engine->name, request->fence.context, request->fence.seqno); | |
1021 | ||
4c7d62c6 | 1022 | lockdep_assert_held(&request->i915->drm.struct_mutex); |
e61e0f51 | 1023 | trace_i915_request_add(request); |
0f25dff6 | 1024 | |
8ac71d1d CW |
1025 | /* |
1026 | * Make sure that no request gazumped us - if it was allocated after | |
e61e0f51 | 1027 | * our i915_request_alloc() and called __i915_request_add() before |
c781c978 CW |
1028 | * us, the timeline will hold its seqno which is later than ours. |
1029 | */ | |
9b6586ae | 1030 | GEM_BUG_ON(timeline->seqno != request->fence.seqno); |
c781c978 | 1031 | |
05235c53 CW |
1032 | /* |
1033 | * To ensure that this call will not fail, space for its emissions | |
1034 | * should already have been reserved in the ring buffer. Let the ring | |
1035 | * know that it is time to use that space up. | |
1036 | */ | |
05235c53 CW |
1037 | request->reserved_space = 0; |
1038 | ||
1039 | /* | |
1040 | * Emit any outstanding flushes - execbuf can fail to emit the flush | |
1041 | * after having emitted the batchbuffer command. Hence we need to fix | |
1042 | * things up similar to emitting the lazy request. The difference here | |
1043 | * is that the flush _must_ happen before the next request, no matter | |
1044 | * what. | |
1045 | */ | |
1046 | if (flush_caches) { | |
caddfe71 | 1047 | err = engine->emit_flush(request, EMIT_FLUSH); |
c7fe7d25 | 1048 | |
05235c53 | 1049 | /* Not allowed to fail! */ |
caddfe71 | 1050 | WARN(err, "engine->emit_flush() failed: %d!\n", err); |
05235c53 CW |
1051 | } |
1052 | ||
8ac71d1d CW |
1053 | /* |
1054 | * Record the position of the start of the breadcrumb so that | |
05235c53 CW |
1055 | * should we detect the updated seqno part-way through the |
1056 | * GPU processing the request, we never over-estimate the | |
d045446d | 1057 | * position of the ring's HEAD. |
05235c53 | 1058 | */ |
73dec95e TU |
1059 | cs = intel_ring_begin(request, engine->emit_breadcrumb_sz); |
1060 | GEM_BUG_ON(IS_ERR(cs)); | |
1061 | request->postfix = intel_ring_offset(request, cs); | |
05235c53 | 1062 | |
8ac71d1d CW |
1063 | /* |
1064 | * Seal the request and mark it as pending execution. Note that | |
0f25dff6 CW |
1065 | * we may inspect this state, without holding any locks, during |
1066 | * hangcheck. Hence we apply the barrier to ensure that we do not | |
1067 | * see a more recent value in the hws than we are tracking. | |
1068 | */ | |
0a046a0e | 1069 | |
73cb9701 | 1070 | prev = i915_gem_active_raw(&timeline->last_request, |
0a046a0e | 1071 | &request->i915->drm.struct_mutex); |
e61e0f51 | 1072 | if (prev && !i915_request_completed(prev)) { |
0a046a0e CW |
1073 | i915_sw_fence_await_sw_fence(&request->submit, &prev->submit, |
1074 | &request->submitq); | |
52e54209 CW |
1075 | if (engine->schedule) |
1076 | __i915_priotree_add_dependency(&request->priotree, | |
1077 | &prev->priotree, | |
1078 | &request->dep, | |
1079 | 0); | |
1080 | } | |
0a046a0e | 1081 | |
80b204bc | 1082 | spin_lock_irq(&timeline->lock); |
f2d13290 | 1083 | list_add_tail(&request->link, &timeline->requests); |
80b204bc CW |
1084 | spin_unlock_irq(&timeline->lock); |
1085 | ||
9b6586ae | 1086 | GEM_BUG_ON(timeline->seqno != request->fence.seqno); |
73cb9701 | 1087 | i915_gem_active_set(&timeline->last_request, request); |
f2d13290 | 1088 | |
0f25dff6 | 1089 | list_add_tail(&request->ring_link, &ring->request_list); |
f2d13290 | 1090 | request->emitted_jiffies = jiffies; |
0f25dff6 | 1091 | |
8ac71d1d CW |
1092 | /* |
1093 | * Let the backend know a new request has arrived that may need | |
0de9136d CW |
1094 | * to adjust the existing execution schedule due to a high priority |
1095 | * request - i.e. we may want to preempt the current request in order | |
1096 | * to run a high priority dependency chain *before* we can execute this | |
1097 | * request. | |
1098 | * | |
1099 | * This is called before the request is ready to run so that we can | |
1100 | * decide whether to preempt the entire chain so that it is ready to | |
1101 | * run at the earliest possible convenience. | |
1102 | */ | |
47650db0 | 1103 | rcu_read_lock(); |
0de9136d | 1104 | if (engine->schedule) |
9f792eba | 1105 | engine->schedule(request, request->ctx->priority); |
47650db0 | 1106 | rcu_read_unlock(); |
0de9136d | 1107 | |
5590af3e CW |
1108 | local_bh_disable(); |
1109 | i915_sw_fence_commit(&request->submit); | |
1110 | local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ | |
c22b355f CW |
1111 | |
1112 | /* | |
1113 | * In typical scenarios, we do not expect the previous request on | |
1114 | * the timeline to be still tracked by timeline->last_request if it | |
1115 | * has been completed. If the completed request is still here, that | |
1116 | * implies that request retirement is a long way behind submission, | |
1117 | * suggesting that we haven't been retiring frequently enough from | |
1118 | * the combination of retire-before-alloc, waiters and the background | |
1119 | * retirement worker. So if the last request on this timeline was | |
1120 | * already completed, do a catch up pass, flushing the retirement queue | |
1121 | * up to this client. Since we have now moved the heaviest operations | |
1122 | * during retirement onto secondary workers, such as freeing objects | |
1123 | * or contexts, retiring a bunch of requests is mostly list management | |
1124 | * (and cache misses), and so we should not be overly penalizing this | |
1125 | * client by performing excess work, though we may still performing | |
1126 | * work on behalf of others -- but instead we should benefit from | |
1127 | * improved resource management. (Well, that's the theory at least.) | |
1128 | */ | |
e61e0f51 CW |
1129 | if (prev && i915_request_completed(prev)) |
1130 | i915_request_retire_upto(prev); | |
05235c53 CW |
1131 | } |
1132 | ||
1133 | static unsigned long local_clock_us(unsigned int *cpu) | |
1134 | { | |
1135 | unsigned long t; | |
1136 | ||
e61e0f51 CW |
1137 | /* |
1138 | * Cheaply and approximately convert from nanoseconds to microseconds. | |
05235c53 CW |
1139 | * The result and subsequent calculations are also defined in the same |
1140 | * approximate microseconds units. The principal source of timing | |
1141 | * error here is from the simple truncation. | |
1142 | * | |
1143 | * Note that local_clock() is only defined wrt to the current CPU; | |
1144 | * the comparisons are no longer valid if we switch CPUs. Instead of | |
1145 | * blocking preemption for the entire busywait, we can detect the CPU | |
1146 | * switch and use that as indicator of system load and a reason to | |
1147 | * stop busywaiting, see busywait_stop(). | |
1148 | */ | |
1149 | *cpu = get_cpu(); | |
1150 | t = local_clock() >> 10; | |
1151 | put_cpu(); | |
1152 | ||
1153 | return t; | |
1154 | } | |
1155 | ||
1156 | static bool busywait_stop(unsigned long timeout, unsigned int cpu) | |
1157 | { | |
1158 | unsigned int this_cpu; | |
1159 | ||
1160 | if (time_after(local_clock_us(&this_cpu), timeout)) | |
1161 | return true; | |
1162 | ||
1163 | return this_cpu != cpu; | |
1164 | } | |
1165 | ||
e61e0f51 | 1166 | static bool __i915_spin_request(const struct i915_request *rq, |
b2f2f0fc | 1167 | u32 seqno, int state, unsigned long timeout_us) |
05235c53 | 1168 | { |
e61e0f51 | 1169 | struct intel_engine_cs *engine = rq->engine; |
c33ed067 | 1170 | unsigned int irq, cpu; |
05235c53 | 1171 | |
b2f2f0fc CW |
1172 | GEM_BUG_ON(!seqno); |
1173 | ||
1174 | /* | |
1175 | * Only wait for the request if we know it is likely to complete. | |
1176 | * | |
1177 | * We don't track the timestamps around requests, nor the average | |
1178 | * request length, so we do not have a good indicator that this | |
1179 | * request will complete within the timeout. What we do know is the | |
1180 | * order in which requests are executed by the engine and so we can | |
1181 | * tell if the request has started. If the request hasn't started yet, | |
1182 | * it is a fair assumption that it will not complete within our | |
1183 | * relatively short timeout. | |
1184 | */ | |
1185 | if (!i915_seqno_passed(intel_engine_get_seqno(engine), seqno - 1)) | |
1186 | return false; | |
1187 | ||
e61e0f51 CW |
1188 | /* |
1189 | * When waiting for high frequency requests, e.g. during synchronous | |
05235c53 CW |
1190 | * rendering split between the CPU and GPU, the finite amount of time |
1191 | * required to set up the irq and wait upon it limits the response | |
1192 | * rate. By busywaiting on the request completion for a short while we | |
1193 | * can service the high frequency waits as quick as possible. However, | |
1194 | * if it is a slow request, we want to sleep as quickly as possible. | |
1195 | * The tradeoff between waiting and sleeping is roughly the time it | |
1196 | * takes to sleep on a request, on the order of a microsecond. | |
1197 | */ | |
1198 | ||
c33ed067 | 1199 | irq = atomic_read(&engine->irq_count); |
05235c53 CW |
1200 | timeout_us += local_clock_us(&cpu); |
1201 | do { | |
b2f2f0fc | 1202 | if (i915_seqno_passed(intel_engine_get_seqno(engine), seqno)) |
e61e0f51 | 1203 | return seqno == i915_request_global_seqno(rq); |
05235c53 | 1204 | |
e61e0f51 CW |
1205 | /* |
1206 | * Seqno are meant to be ordered *before* the interrupt. If | |
c33ed067 CW |
1207 | * we see an interrupt without a corresponding seqno advance, |
1208 | * assume we won't see one in the near future but require | |
1209 | * the engine->seqno_barrier() to fixup coherency. | |
1210 | */ | |
1211 | if (atomic_read(&engine->irq_count) != irq) | |
1212 | break; | |
1213 | ||
05235c53 CW |
1214 | if (signal_pending_state(state, current)) |
1215 | break; | |
1216 | ||
1217 | if (busywait_stop(timeout_us, cpu)) | |
1218 | break; | |
1219 | ||
f2f09a4c | 1220 | cpu_relax(); |
05235c53 CW |
1221 | } while (!need_resched()); |
1222 | ||
1223 | return false; | |
1224 | } | |
1225 | ||
e61e0f51 | 1226 | static bool __i915_wait_request_check_and_reset(struct i915_request *request) |
4680816b | 1227 | { |
8c185eca | 1228 | if (likely(!i915_reset_handoff(&request->i915->gpu_error))) |
e0705114 | 1229 | return false; |
4680816b | 1230 | |
e0705114 | 1231 | __set_current_state(TASK_RUNNING); |
ce800754 | 1232 | i915_reset(request->i915); |
e0705114 | 1233 | return true; |
4680816b CW |
1234 | } |
1235 | ||
05235c53 | 1236 | /** |
e532be89 | 1237 | * i915_request_wait - wait until execution of request has finished |
e61e0f51 | 1238 | * @rq: the request to wait upon |
ea746f36 | 1239 | * @flags: how to wait |
e95433c7 CW |
1240 | * @timeout: how long to wait in jiffies |
1241 | * | |
e532be89 | 1242 | * i915_request_wait() waits for the request to be completed, for a |
e95433c7 CW |
1243 | * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an |
1244 | * unbounded wait). | |
05235c53 | 1245 | * |
e95433c7 CW |
1246 | * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED |
1247 | * in via the flags, and vice versa if the struct_mutex is not held, the caller | |
1248 | * must not specify that the wait is locked. | |
05235c53 | 1249 | * |
e95433c7 CW |
1250 | * Returns the remaining time (in jiffies) if the request completed, which may |
1251 | * be zero or -ETIME if the request is unfinished after the timeout expires. | |
1252 | * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is | |
1253 | * pending before the request completes. | |
05235c53 | 1254 | */ |
e61e0f51 | 1255 | long i915_request_wait(struct i915_request *rq, |
e95433c7 CW |
1256 | unsigned int flags, |
1257 | long timeout) | |
05235c53 | 1258 | { |
ea746f36 CW |
1259 | const int state = flags & I915_WAIT_INTERRUPTIBLE ? |
1260 | TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; | |
e61e0f51 | 1261 | wait_queue_head_t *errq = &rq->i915->gpu_error.wait_queue; |
a49625f9 CW |
1262 | DEFINE_WAIT_FUNC(reset, default_wake_function); |
1263 | DEFINE_WAIT_FUNC(exec, default_wake_function); | |
05235c53 | 1264 | struct intel_wait wait; |
05235c53 CW |
1265 | |
1266 | might_sleep(); | |
22dd3bb9 | 1267 | #if IS_ENABLED(CONFIG_LOCKDEP) |
e95433c7 | 1268 | GEM_BUG_ON(debug_locks && |
e61e0f51 | 1269 | !!lockdep_is_held(&rq->i915->drm.struct_mutex) != |
22dd3bb9 CW |
1270 | !!(flags & I915_WAIT_LOCKED)); |
1271 | #endif | |
e95433c7 | 1272 | GEM_BUG_ON(timeout < 0); |
05235c53 | 1273 | |
e61e0f51 | 1274 | if (i915_request_completed(rq)) |
e95433c7 | 1275 | return timeout; |
05235c53 | 1276 | |
e95433c7 CW |
1277 | if (!timeout) |
1278 | return -ETIME; | |
05235c53 | 1279 | |
e61e0f51 | 1280 | trace_i915_request_wait_begin(rq, flags); |
05235c53 | 1281 | |
e61e0f51 | 1282 | add_wait_queue(&rq->execute, &exec); |
7de53bf7 CW |
1283 | if (flags & I915_WAIT_LOCKED) |
1284 | add_wait_queue(errq, &reset); | |
1285 | ||
e61e0f51 | 1286 | intel_wait_init(&wait, rq); |
754c9fd5 | 1287 | |
d6a2289d | 1288 | restart: |
0f2f61d4 CW |
1289 | do { |
1290 | set_current_state(state); | |
e61e0f51 | 1291 | if (intel_wait_update_request(&wait, rq)) |
0f2f61d4 | 1292 | break; |
541ca6ed | 1293 | |
0f2f61d4 | 1294 | if (flags & I915_WAIT_LOCKED && |
e61e0f51 | 1295 | __i915_wait_request_check_and_reset(rq)) |
0f2f61d4 | 1296 | continue; |
05235c53 | 1297 | |
0f2f61d4 CW |
1298 | if (signal_pending_state(state, current)) { |
1299 | timeout = -ERESTARTSYS; | |
4680816b | 1300 | goto complete; |
0f2f61d4 | 1301 | } |
4680816b | 1302 | |
0f2f61d4 CW |
1303 | if (!timeout) { |
1304 | timeout = -ETIME; | |
1305 | goto complete; | |
1306 | } | |
541ca6ed | 1307 | |
0f2f61d4 CW |
1308 | timeout = io_schedule_timeout(timeout); |
1309 | } while (1); | |
4680816b | 1310 | |
0f2f61d4 | 1311 | GEM_BUG_ON(!intel_wait_has_seqno(&wait)); |
e61e0f51 | 1312 | GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit)); |
4680816b | 1313 | |
437c3087 | 1314 | /* Optimistic short spin before touching IRQs */ |
e61e0f51 | 1315 | if (__i915_spin_request(rq, wait.seqno, state, 5)) |
05235c53 CW |
1316 | goto complete; |
1317 | ||
1318 | set_current_state(state); | |
e61e0f51 CW |
1319 | if (intel_engine_add_wait(rq->engine, &wait)) |
1320 | /* | |
1321 | * In order to check that we haven't missed the interrupt | |
05235c53 CW |
1322 | * as we enabled it, we need to kick ourselves to do a |
1323 | * coherent check on the seqno before we sleep. | |
1324 | */ | |
1325 | goto wakeup; | |
1326 | ||
24f417ec | 1327 | if (flags & I915_WAIT_LOCKED) |
e61e0f51 | 1328 | __i915_wait_request_check_and_reset(rq); |
24f417ec | 1329 | |
05235c53 CW |
1330 | for (;;) { |
1331 | if (signal_pending_state(state, current)) { | |
e95433c7 | 1332 | timeout = -ERESTARTSYS; |
05235c53 CW |
1333 | break; |
1334 | } | |
1335 | ||
e95433c7 CW |
1336 | if (!timeout) { |
1337 | timeout = -ETIME; | |
05235c53 CW |
1338 | break; |
1339 | } | |
1340 | ||
e95433c7 CW |
1341 | timeout = io_schedule_timeout(timeout); |
1342 | ||
754c9fd5 | 1343 | if (intel_wait_complete(&wait) && |
e61e0f51 | 1344 | intel_wait_check_request(&wait, rq)) |
05235c53 CW |
1345 | break; |
1346 | ||
1347 | set_current_state(state); | |
1348 | ||
1349 | wakeup: | |
e61e0f51 CW |
1350 | /* |
1351 | * Carefully check if the request is complete, giving time | |
05235c53 CW |
1352 | * for the seqno to be visible following the interrupt. |
1353 | * We also have to check in case we are kicked by the GPU | |
1354 | * reset in order to drop the struct_mutex. | |
1355 | */ | |
e61e0f51 | 1356 | if (__i915_request_irq_complete(rq)) |
05235c53 CW |
1357 | break; |
1358 | ||
e61e0f51 CW |
1359 | /* |
1360 | * If the GPU is hung, and we hold the lock, reset the GPU | |
221fe799 CW |
1361 | * and then check for completion. On a full reset, the engine's |
1362 | * HW seqno will be advanced passed us and we are complete. | |
1363 | * If we do a partial reset, we have to wait for the GPU to | |
1364 | * resume and update the breadcrumb. | |
1365 | * | |
1366 | * If we don't hold the mutex, we can just wait for the worker | |
1367 | * to come along and update the breadcrumb (either directly | |
1368 | * itself, or indirectly by recovering the GPU). | |
1369 | */ | |
1370 | if (flags & I915_WAIT_LOCKED && | |
e61e0f51 | 1371 | __i915_wait_request_check_and_reset(rq)) |
221fe799 | 1372 | continue; |
221fe799 | 1373 | |
05235c53 | 1374 | /* Only spin if we know the GPU is processing this request */ |
e61e0f51 | 1375 | if (__i915_spin_request(rq, wait.seqno, state, 2)) |
05235c53 | 1376 | break; |
d6a2289d | 1377 | |
e61e0f51 CW |
1378 | if (!intel_wait_check_request(&wait, rq)) { |
1379 | intel_engine_remove_wait(rq->engine, &wait); | |
d6a2289d CW |
1380 | goto restart; |
1381 | } | |
05235c53 | 1382 | } |
05235c53 | 1383 | |
e61e0f51 | 1384 | intel_engine_remove_wait(rq->engine, &wait); |
05235c53 | 1385 | complete: |
a49625f9 | 1386 | __set_current_state(TASK_RUNNING); |
7de53bf7 CW |
1387 | if (flags & I915_WAIT_LOCKED) |
1388 | remove_wait_queue(errq, &reset); | |
e61e0f51 CW |
1389 | remove_wait_queue(&rq->execute, &exec); |
1390 | trace_i915_request_wait_end(rq); | |
05235c53 | 1391 | |
e95433c7 | 1392 | return timeout; |
05235c53 | 1393 | } |
4b8de8e6 | 1394 | |
28176ef4 | 1395 | static void engine_retire_requests(struct intel_engine_cs *engine) |
4b8de8e6 | 1396 | { |
e61e0f51 | 1397 | struct i915_request *request, *next; |
754c9fd5 CW |
1398 | u32 seqno = intel_engine_get_seqno(engine); |
1399 | LIST_HEAD(retire); | |
4b8de8e6 | 1400 | |
754c9fd5 | 1401 | spin_lock_irq(&engine->timeline->lock); |
73cb9701 CW |
1402 | list_for_each_entry_safe(request, next, |
1403 | &engine->timeline->requests, link) { | |
754c9fd5 CW |
1404 | if (!i915_seqno_passed(seqno, request->global_seqno)) |
1405 | break; | |
4b8de8e6 | 1406 | |
754c9fd5 | 1407 | list_move_tail(&request->link, &retire); |
4b8de8e6 | 1408 | } |
754c9fd5 CW |
1409 | spin_unlock_irq(&engine->timeline->lock); |
1410 | ||
1411 | list_for_each_entry_safe(request, next, &retire, link) | |
e61e0f51 | 1412 | i915_request_retire(request); |
4b8de8e6 CW |
1413 | } |
1414 | ||
e61e0f51 | 1415 | void i915_retire_requests(struct drm_i915_private *i915) |
4b8de8e6 CW |
1416 | { |
1417 | struct intel_engine_cs *engine; | |
28176ef4 | 1418 | enum intel_engine_id id; |
4b8de8e6 | 1419 | |
e61e0f51 | 1420 | lockdep_assert_held(&i915->drm.struct_mutex); |
4b8de8e6 | 1421 | |
e61e0f51 | 1422 | if (!i915->gt.active_requests) |
4b8de8e6 CW |
1423 | return; |
1424 | ||
e61e0f51 | 1425 | for_each_engine(engine, i915, id) |
28176ef4 | 1426 | engine_retire_requests(engine); |
4b8de8e6 | 1427 | } |
c835c550 CW |
1428 | |
1429 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) | |
1430 | #include "selftests/mock_request.c" | |
e61e0f51 | 1431 | #include "selftests/i915_request.c" |
c835c550 | 1432 | #endif |