Commit | Line | Data |
---|---|---|
05235c53 CW |
1 | /* |
2 | * Copyright © 2008-2015 Intel Corporation | |
3 | * | |
4 | * Permission is hereby granted, free of charge, to any person obtaining a | |
5 | * copy of this software and associated documentation files (the "Software"), | |
6 | * to deal in the Software without restriction, including without limitation | |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, | |
8 | * and/or sell copies of the Software, and to permit persons to whom the | |
9 | * Software is furnished to do so, subject to the following conditions: | |
10 | * | |
11 | * The above copyright notice and this permission notice (including the next | |
12 | * paragraph) shall be included in all copies or substantial portions of the | |
13 | * Software. | |
14 | * | |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL | |
18 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER | |
19 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING | |
20 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS | |
21 | * IN THE SOFTWARE. | |
22 | * | |
23 | */ | |
24 | ||
fa545cbf | 25 | #include <linux/prefetch.h> |
b52992c0 | 26 | #include <linux/dma-fence-array.h> |
e6017571 IM |
27 | #include <linux/sched.h> |
28 | #include <linux/sched/clock.h> | |
f361bf4a | 29 | #include <linux/sched/signal.h> |
fa545cbf | 30 | |
05235c53 | 31 | #include "i915_drv.h" |
9f58892e | 32 | #include "i915_reset.h" |
05235c53 | 33 | |
f54d1867 | 34 | static const char *i915_fence_get_driver_name(struct dma_fence *fence) |
04769652 CW |
35 | { |
36 | return "i915"; | |
37 | } | |
38 | ||
f54d1867 | 39 | static const char *i915_fence_get_timeline_name(struct dma_fence *fence) |
04769652 | 40 | { |
e61e0f51 CW |
41 | /* |
42 | * The timeline struct (as part of the ppgtt underneath a context) | |
05506b5b CW |
43 | * may be freed when the request is no longer in use by the GPU. |
44 | * We could extend the life of a context to beyond that of all | |
45 | * fences, possibly keeping the hw resource around indefinitely, | |
46 | * or we just give them a false name. Since | |
47 | * dma_fence_ops.get_timeline_name is a debug feature, the occasional | |
48 | * lie seems justifiable. | |
49 | */ | |
50 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | |
51 | return "signaled"; | |
52 | ||
a89d1f92 | 53 | return to_request(fence)->timeline->name; |
04769652 CW |
54 | } |
55 | ||
f54d1867 | 56 | static bool i915_fence_signaled(struct dma_fence *fence) |
04769652 | 57 | { |
e61e0f51 | 58 | return i915_request_completed(to_request(fence)); |
04769652 CW |
59 | } |
60 | ||
f54d1867 | 61 | static bool i915_fence_enable_signaling(struct dma_fence *fence) |
04769652 | 62 | { |
52c0fdb2 | 63 | return i915_request_enable_breadcrumb(to_request(fence)); |
04769652 CW |
64 | } |
65 | ||
f54d1867 | 66 | static signed long i915_fence_wait(struct dma_fence *fence, |
04769652 | 67 | bool interruptible, |
e95433c7 | 68 | signed long timeout) |
04769652 | 69 | { |
e61e0f51 | 70 | return i915_request_wait(to_request(fence), interruptible, timeout); |
04769652 CW |
71 | } |
72 | ||
f54d1867 | 73 | static void i915_fence_release(struct dma_fence *fence) |
04769652 | 74 | { |
e61e0f51 | 75 | struct i915_request *rq = to_request(fence); |
04769652 | 76 | |
e61e0f51 CW |
77 | /* |
78 | * The request is put onto a RCU freelist (i.e. the address | |
fc158405 CW |
79 | * is immediately reused), mark the fences as being freed now. |
80 | * Otherwise the debugobjects for the fences are only marked as | |
81 | * freed when the slab cache itself is freed, and so we would get | |
82 | * caught trying to reuse dead objects. | |
83 | */ | |
e61e0f51 | 84 | i915_sw_fence_fini(&rq->submit); |
fc158405 | 85 | |
e61e0f51 | 86 | kmem_cache_free(rq->i915->requests, rq); |
04769652 CW |
87 | } |
88 | ||
f54d1867 | 89 | const struct dma_fence_ops i915_fence_ops = { |
04769652 CW |
90 | .get_driver_name = i915_fence_get_driver_name, |
91 | .get_timeline_name = i915_fence_get_timeline_name, | |
92 | .enable_signaling = i915_fence_enable_signaling, | |
93 | .signaled = i915_fence_signaled, | |
94 | .wait = i915_fence_wait, | |
95 | .release = i915_fence_release, | |
04769652 CW |
96 | }; |
97 | ||
05235c53 | 98 | static inline void |
e61e0f51 | 99 | i915_request_remove_from_client(struct i915_request *request) |
05235c53 | 100 | { |
c8659efa | 101 | struct drm_i915_file_private *file_priv; |
05235c53 | 102 | |
c8659efa | 103 | file_priv = request->file_priv; |
05235c53 CW |
104 | if (!file_priv) |
105 | return; | |
106 | ||
107 | spin_lock(&file_priv->mm.lock); | |
c8659efa CW |
108 | if (request->file_priv) { |
109 | list_del(&request->client_link); | |
110 | request->file_priv = NULL; | |
111 | } | |
05235c53 | 112 | spin_unlock(&file_priv->mm.lock); |
05235c53 CW |
113 | } |
114 | ||
6faf5916 | 115 | static void reserve_gt(struct drm_i915_private *i915) |
12d3173b | 116 | { |
636918f1 | 117 | if (!i915->gt.active_requests++) |
e4d2006f | 118 | i915_gem_unpark(i915); |
12d3173b CW |
119 | } |
120 | ||
52d7f16e | 121 | static void unreserve_gt(struct drm_i915_private *i915) |
9b6586ae | 122 | { |
b887d615 | 123 | GEM_BUG_ON(!i915->gt.active_requests); |
e4d2006f CW |
124 | if (!--i915->gt.active_requests) |
125 | i915_gem_park(i915); | |
9b6586ae CW |
126 | } |
127 | ||
fa545cbf | 128 | void i915_gem_retire_noop(struct i915_gem_active *active, |
e61e0f51 | 129 | struct i915_request *request) |
fa545cbf CW |
130 | { |
131 | /* Space left intentionally blank */ | |
132 | } | |
133 | ||
e61e0f51 | 134 | static void advance_ring(struct i915_request *request) |
cbb60b4b | 135 | { |
b887d615 | 136 | struct intel_ring *ring = request->ring; |
cbb60b4b CW |
137 | unsigned int tail; |
138 | ||
e61e0f51 CW |
139 | /* |
140 | * We know the GPU must have read the request to have | |
cbb60b4b CW |
141 | * sent us the seqno + interrupt, so use the position |
142 | * of tail of the request to update the last known position | |
143 | * of the GPU head. | |
144 | * | |
145 | * Note this requires that we are always called in request | |
146 | * completion order. | |
147 | */ | |
b887d615 CW |
148 | GEM_BUG_ON(!list_is_first(&request->ring_link, &ring->request_list)); |
149 | if (list_is_last(&request->ring_link, &ring->request_list)) { | |
e61e0f51 CW |
150 | /* |
151 | * We may race here with execlists resubmitting this request | |
e6ba9992 CW |
152 | * as we retire it. The resubmission will move the ring->tail |
153 | * forwards (to request->wa_tail). We either read the | |
154 | * current value that was written to hw, or the value that | |
155 | * is just about to be. Either works, if we miss the last two | |
156 | * noops - they are safe to be replayed on a reset. | |
157 | */ | |
09a4c02e | 158 | GEM_TRACE("marking %s as inactive\n", ring->timeline->name); |
36620032 | 159 | tail = READ_ONCE(request->tail); |
643b450a | 160 | list_del(&ring->active_link); |
e6ba9992 | 161 | } else { |
cbb60b4b | 162 | tail = request->postfix; |
e6ba9992 | 163 | } |
b887d615 | 164 | list_del_init(&request->ring_link); |
cbb60b4b | 165 | |
b887d615 | 166 | ring->head = tail; |
cbb60b4b CW |
167 | } |
168 | ||
e61e0f51 | 169 | static void free_capture_list(struct i915_request *request) |
b0fd47ad | 170 | { |
e61e0f51 | 171 | struct i915_capture_list *capture; |
b0fd47ad CW |
172 | |
173 | capture = request->capture_list; | |
174 | while (capture) { | |
e61e0f51 | 175 | struct i915_capture_list *next = capture->next; |
b0fd47ad CW |
176 | |
177 | kfree(capture); | |
178 | capture = next; | |
179 | } | |
180 | } | |
181 | ||
b887d615 CW |
182 | static void __retire_engine_request(struct intel_engine_cs *engine, |
183 | struct i915_request *rq) | |
184 | { | |
3adac468 | 185 | GEM_TRACE("%s(%s) fence %llx:%lld, global=%d, current %d:%d\n", |
b887d615 CW |
186 | __func__, engine->name, |
187 | rq->fence.context, rq->fence.seqno, | |
188 | rq->global_seqno, | |
3adac468 | 189 | hwsp_seqno(rq), |
b887d615 CW |
190 | intel_engine_get_seqno(engine)); |
191 | ||
192 | GEM_BUG_ON(!i915_request_completed(rq)); | |
193 | ||
194 | local_irq_disable(); | |
195 | ||
a89d1f92 CW |
196 | spin_lock(&engine->timeline.lock); |
197 | GEM_BUG_ON(!list_is_first(&rq->link, &engine->timeline.requests)); | |
b887d615 | 198 | list_del_init(&rq->link); |
a89d1f92 | 199 | spin_unlock(&engine->timeline.lock); |
b887d615 CW |
200 | |
201 | spin_lock(&rq->lock); | |
5013eb8c | 202 | i915_request_mark_complete(rq); |
0e21834e | 203 | if (!i915_request_signaled(rq)) |
b887d615 CW |
204 | dma_fence_signal_locked(&rq->fence); |
205 | if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &rq->fence.flags)) | |
52c0fdb2 | 206 | i915_request_cancel_breadcrumb(rq); |
b887d615 CW |
207 | if (rq->waitboost) { |
208 | GEM_BUG_ON(!atomic_read(&rq->i915->gt_pm.rps.num_waiters)); | |
209 | atomic_dec(&rq->i915->gt_pm.rps.num_waiters); | |
210 | } | |
211 | spin_unlock(&rq->lock); | |
212 | ||
213 | local_irq_enable(); | |
214 | ||
215 | /* | |
216 | * The backing object for the context is done after switching to the | |
217 | * *next* context. Therefore we cannot retire the previous context until | |
218 | * the next context has already started running. However, since we | |
219 | * cannot take the required locks at i915_request_submit() we | |
220 | * defer the unpinning of the active context to now, retirement of | |
221 | * the subsequent request. | |
222 | */ | |
223 | if (engine->last_retired_context) | |
1fc44d9b CW |
224 | intel_context_unpin(engine->last_retired_context); |
225 | engine->last_retired_context = rq->hw_context; | |
b887d615 CW |
226 | } |
227 | ||
228 | static void __retire_engine_upto(struct intel_engine_cs *engine, | |
229 | struct i915_request *rq) | |
230 | { | |
231 | struct i915_request *tmp; | |
232 | ||
233 | if (list_empty(&rq->link)) | |
234 | return; | |
235 | ||
236 | do { | |
a89d1f92 | 237 | tmp = list_first_entry(&engine->timeline.requests, |
b887d615 CW |
238 | typeof(*tmp), link); |
239 | ||
240 | GEM_BUG_ON(tmp->engine != engine); | |
241 | __retire_engine_request(engine, tmp); | |
242 | } while (tmp != rq); | |
243 | } | |
244 | ||
e61e0f51 | 245 | static void i915_request_retire(struct i915_request *request) |
05235c53 | 246 | { |
fa545cbf CW |
247 | struct i915_gem_active *active, *next; |
248 | ||
3adac468 | 249 | GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n", |
b887d615 | 250 | request->engine->name, |
d9b13c4d | 251 | request->fence.context, request->fence.seqno, |
e7702760 | 252 | request->global_seqno, |
3adac468 | 253 | hwsp_seqno(request), |
b887d615 | 254 | intel_engine_get_seqno(request->engine)); |
d9b13c4d | 255 | |
4c7d62c6 | 256 | lockdep_assert_held(&request->i915->drm.struct_mutex); |
48bc2a4a | 257 | GEM_BUG_ON(!i915_sw_fence_signaled(&request->submit)); |
e61e0f51 | 258 | GEM_BUG_ON(!i915_request_completed(request)); |
4c7d62c6 | 259 | |
e61e0f51 | 260 | trace_i915_request_retire(request); |
80b204bc | 261 | |
cbb60b4b | 262 | advance_ring(request); |
b0fd47ad CW |
263 | free_capture_list(request); |
264 | ||
e61e0f51 CW |
265 | /* |
266 | * Walk through the active list, calling retire on each. This allows | |
fa545cbf CW |
267 | * objects to track their GPU activity and mark themselves as idle |
268 | * when their *last* active request is completed (updating state | |
269 | * tracking lists for eviction, active references for GEM, etc). | |
270 | * | |
271 | * As the ->retire() may free the node, we decouple it first and | |
272 | * pass along the auxiliary information (to avoid dereferencing | |
273 | * the node after the callback). | |
274 | */ | |
275 | list_for_each_entry_safe(active, next, &request->active_list, link) { | |
e61e0f51 CW |
276 | /* |
277 | * In microbenchmarks or focusing upon time inside the kernel, | |
fa545cbf CW |
278 | * we may spend an inordinate amount of time simply handling |
279 | * the retirement of requests and processing their callbacks. | |
280 | * Of which, this loop itself is particularly hot due to the | |
281 | * cache misses when jumping around the list of i915_gem_active. | |
282 | * So we try to keep this loop as streamlined as possible and | |
283 | * also prefetch the next i915_gem_active to try and hide | |
284 | * the likely cache miss. | |
285 | */ | |
286 | prefetchw(next); | |
287 | ||
288 | INIT_LIST_HEAD(&active->link); | |
0eafec6d | 289 | RCU_INIT_POINTER(active->request, NULL); |
fa545cbf CW |
290 | |
291 | active->retire(active, request); | |
292 | } | |
293 | ||
e61e0f51 | 294 | i915_request_remove_from_client(request); |
05235c53 | 295 | |
e5e1fc47 | 296 | /* Retirement decays the ban score as it is a sign of ctx progress */ |
4e0d64db | 297 | atomic_dec_if_positive(&request->gem_context->ban_score); |
1fc44d9b | 298 | intel_context_unpin(request->hw_context); |
e5e1fc47 | 299 | |
b887d615 | 300 | __retire_engine_upto(request->engine, request); |
52e54209 | 301 | |
52d7f16e CW |
302 | unreserve_gt(request->i915); |
303 | ||
0c7112a0 | 304 | i915_sched_node_fini(request->i915, &request->sched); |
e61e0f51 | 305 | i915_request_put(request); |
05235c53 CW |
306 | } |
307 | ||
e61e0f51 | 308 | void i915_request_retire_upto(struct i915_request *rq) |
05235c53 | 309 | { |
b887d615 | 310 | struct intel_ring *ring = rq->ring; |
e61e0f51 | 311 | struct i915_request *tmp; |
05235c53 | 312 | |
3adac468 | 313 | GEM_TRACE("%s fence %llx:%lld, global=%d, current %d:%d\n", |
b887d615 CW |
314 | rq->engine->name, |
315 | rq->fence.context, rq->fence.seqno, | |
316 | rq->global_seqno, | |
3adac468 | 317 | hwsp_seqno(rq), |
b887d615 CW |
318 | intel_engine_get_seqno(rq->engine)); |
319 | ||
e61e0f51 CW |
320 | lockdep_assert_held(&rq->i915->drm.struct_mutex); |
321 | GEM_BUG_ON(!i915_request_completed(rq)); | |
4ffd6e0c | 322 | |
b887d615 | 323 | if (list_empty(&rq->ring_link)) |
e95433c7 | 324 | return; |
05235c53 CW |
325 | |
326 | do { | |
b887d615 CW |
327 | tmp = list_first_entry(&ring->request_list, |
328 | typeof(*tmp), ring_link); | |
05235c53 | 329 | |
e61e0f51 CW |
330 | i915_request_retire(tmp); |
331 | } while (tmp != rq); | |
05235c53 CW |
332 | } |
333 | ||
a89d1f92 | 334 | static u32 timeline_get_seqno(struct i915_timeline *tl) |
05235c53 | 335 | { |
85474441 | 336 | return tl->seqno += 1 + tl->has_initial_breadcrumb; |
28176ef4 CW |
337 | } |
338 | ||
4ccfee92 | 339 | static void move_to_timeline(struct i915_request *request, |
a89d1f92 | 340 | struct i915_timeline *timeline) |
4ccfee92 | 341 | { |
a89d1f92 CW |
342 | GEM_BUG_ON(request->timeline == &request->engine->timeline); |
343 | lockdep_assert_held(&request->engine->timeline.lock); | |
4ccfee92 | 344 | |
890fd185 | 345 | spin_lock(&request->timeline->lock); |
4ccfee92 CW |
346 | list_move_tail(&request->link, &timeline->requests); |
347 | spin_unlock(&request->timeline->lock); | |
348 | } | |
349 | ||
f1e9c909 CW |
350 | static u32 next_global_seqno(struct i915_timeline *tl) |
351 | { | |
352 | if (!++tl->seqno) | |
353 | ++tl->seqno; | |
354 | return tl->seqno; | |
355 | } | |
356 | ||
e61e0f51 | 357 | void __i915_request_submit(struct i915_request *request) |
5590af3e | 358 | { |
73cb9701 | 359 | struct intel_engine_cs *engine = request->engine; |
f2d13290 | 360 | u32 seqno; |
5590af3e | 361 | |
3adac468 | 362 | GEM_TRACE("%s fence %llx:%lld -> global=%d, current %d:%d\n", |
e7702760 | 363 | engine->name, |
d9b13c4d | 364 | request->fence.context, request->fence.seqno, |
a89d1f92 | 365 | engine->timeline.seqno + 1, |
3adac468 | 366 | hwsp_seqno(request), |
e7702760 | 367 | intel_engine_get_seqno(engine)); |
d9b13c4d | 368 | |
e60a870d | 369 | GEM_BUG_ON(!irqs_disabled()); |
a89d1f92 | 370 | lockdep_assert_held(&engine->timeline.lock); |
e60a870d | 371 | |
2d453c78 | 372 | GEM_BUG_ON(request->global_seqno); |
5590af3e | 373 | |
f1e9c909 | 374 | seqno = next_global_seqno(&engine->timeline); |
f2d13290 | 375 | GEM_BUG_ON(!seqno); |
97f06158 | 376 | GEM_BUG_ON(intel_engine_signaled(engine, seqno)); |
f2d13290 | 377 | |
f2d13290 CW |
378 | /* We may be recursing from the signal callback of another i915 fence */ |
379 | spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); | |
52c0fdb2 CW |
380 | GEM_BUG_ON(test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); |
381 | set_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); | |
f2d13290 | 382 | request->global_seqno = seqno; |
52c0fdb2 CW |
383 | if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags) && |
384 | !i915_request_enable_breadcrumb(request)) | |
385 | intel_engine_queue_breadcrumbs(engine); | |
f2d13290 CW |
386 | spin_unlock(&request->lock); |
387 | ||
85474441 CW |
388 | engine->emit_fini_breadcrumb(request, |
389 | request->ring->vaddr + request->postfix); | |
5590af3e | 390 | |
4ccfee92 | 391 | /* Transfer from per-context onto the global per-engine timeline */ |
a89d1f92 | 392 | move_to_timeline(request, &engine->timeline); |
80b204bc | 393 | |
e61e0f51 | 394 | trace_i915_request_execute(request); |
d55ac5bf CW |
395 | } |
396 | ||
e61e0f51 | 397 | void i915_request_submit(struct i915_request *request) |
d55ac5bf CW |
398 | { |
399 | struct intel_engine_cs *engine = request->engine; | |
400 | unsigned long flags; | |
23902e49 | 401 | |
d55ac5bf | 402 | /* Will be called from irq-context when using foreign fences. */ |
a89d1f92 | 403 | spin_lock_irqsave(&engine->timeline.lock, flags); |
d55ac5bf | 404 | |
e61e0f51 | 405 | __i915_request_submit(request); |
d55ac5bf | 406 | |
a89d1f92 | 407 | spin_unlock_irqrestore(&engine->timeline.lock, flags); |
d55ac5bf CW |
408 | } |
409 | ||
e61e0f51 | 410 | void __i915_request_unsubmit(struct i915_request *request) |
d55ac5bf | 411 | { |
d6a2289d | 412 | struct intel_engine_cs *engine = request->engine; |
d55ac5bf | 413 | |
3adac468 | 414 | GEM_TRACE("%s fence %llx:%lld <- global=%d, current %d:%d\n", |
e7702760 | 415 | engine->name, |
d9b13c4d | 416 | request->fence.context, request->fence.seqno, |
e7702760 | 417 | request->global_seqno, |
3adac468 | 418 | hwsp_seqno(request), |
e7702760 | 419 | intel_engine_get_seqno(engine)); |
d9b13c4d | 420 | |
e60a870d | 421 | GEM_BUG_ON(!irqs_disabled()); |
a89d1f92 | 422 | lockdep_assert_held(&engine->timeline.lock); |
48bc2a4a | 423 | |
e61e0f51 CW |
424 | /* |
425 | * Only unwind in reverse order, required so that the per-context list | |
d6a2289d CW |
426 | * is kept in seqno/ring order. |
427 | */ | |
2d453c78 | 428 | GEM_BUG_ON(!request->global_seqno); |
a89d1f92 | 429 | GEM_BUG_ON(request->global_seqno != engine->timeline.seqno); |
97f06158 | 430 | GEM_BUG_ON(intel_engine_has_completed(engine, request->global_seqno)); |
a89d1f92 | 431 | engine->timeline.seqno--; |
80b204bc | 432 | |
d6a2289d CW |
433 | /* We may be recursing from the signal callback of another i915 fence */ |
434 | spin_lock_nested(&request->lock, SINGLE_DEPTH_NESTING); | |
435 | request->global_seqno = 0; | |
436 | if (test_bit(DMA_FENCE_FLAG_ENABLE_SIGNAL_BIT, &request->fence.flags)) | |
52c0fdb2 CW |
437 | i915_request_cancel_breadcrumb(request); |
438 | GEM_BUG_ON(!test_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags)); | |
439 | clear_bit(I915_FENCE_FLAG_ACTIVE, &request->fence.flags); | |
d6a2289d CW |
440 | spin_unlock(&request->lock); |
441 | ||
442 | /* Transfer back from the global per-engine timeline to per-context */ | |
4ccfee92 | 443 | move_to_timeline(request, request->timeline); |
d6a2289d | 444 | |
e61e0f51 CW |
445 | /* |
446 | * We don't need to wake_up any waiters on request->execute, they | |
d6a2289d | 447 | * will get woken by any other event or us re-adding this request |
e61e0f51 | 448 | * to the engine timeline (__i915_request_submit()). The waiters |
d6a2289d CW |
449 | * should be quite adapt at finding that the request now has a new |
450 | * global_seqno to the one they went to sleep on. | |
451 | */ | |
452 | } | |
453 | ||
e61e0f51 | 454 | void i915_request_unsubmit(struct i915_request *request) |
d6a2289d CW |
455 | { |
456 | struct intel_engine_cs *engine = request->engine; | |
457 | unsigned long flags; | |
458 | ||
459 | /* Will be called from irq-context when using foreign fences. */ | |
a89d1f92 | 460 | spin_lock_irqsave(&engine->timeline.lock, flags); |
d6a2289d | 461 | |
e61e0f51 | 462 | __i915_request_unsubmit(request); |
d6a2289d | 463 | |
a89d1f92 | 464 | spin_unlock_irqrestore(&engine->timeline.lock, flags); |
5590af3e CW |
465 | } |
466 | ||
23902e49 | 467 | static int __i915_sw_fence_call |
d55ac5bf | 468 | submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) |
23902e49 | 469 | { |
e61e0f51 | 470 | struct i915_request *request = |
48bc2a4a | 471 | container_of(fence, typeof(*request), submit); |
48bc2a4a CW |
472 | |
473 | switch (state) { | |
474 | case FENCE_COMPLETE: | |
e61e0f51 | 475 | trace_i915_request_submit(request); |
af7a8ffa | 476 | /* |
e61e0f51 CW |
477 | * We need to serialize use of the submit_request() callback |
478 | * with its hotplugging performed during an emergency | |
479 | * i915_gem_set_wedged(). We use the RCU mechanism to mark the | |
480 | * critical section in order to force i915_gem_set_wedged() to | |
481 | * wait until the submit_request() is completed before | |
482 | * proceeding. | |
af7a8ffa DV |
483 | */ |
484 | rcu_read_lock(); | |
d55ac5bf | 485 | request->engine->submit_request(request); |
af7a8ffa | 486 | rcu_read_unlock(); |
48bc2a4a CW |
487 | break; |
488 | ||
489 | case FENCE_FREE: | |
e61e0f51 | 490 | i915_request_put(request); |
48bc2a4a CW |
491 | break; |
492 | } | |
493 | ||
23902e49 CW |
494 | return NOTIFY_DONE; |
495 | } | |
496 | ||
d22ba0cb CW |
497 | static void ring_retire_requests(struct intel_ring *ring) |
498 | { | |
499 | struct i915_request *rq, *rn; | |
500 | ||
501 | list_for_each_entry_safe(rq, rn, &ring->request_list, ring_link) { | |
502 | if (!i915_request_completed(rq)) | |
503 | break; | |
504 | ||
505 | i915_request_retire(rq); | |
506 | } | |
507 | } | |
508 | ||
509 | static noinline struct i915_request * | |
510 | i915_request_alloc_slow(struct intel_context *ce) | |
511 | { | |
512 | struct intel_ring *ring = ce->ring; | |
513 | struct i915_request *rq; | |
514 | ||
515 | if (list_empty(&ring->request_list)) | |
516 | goto out; | |
517 | ||
518 | /* Ratelimit ourselves to prevent oom from malicious clients */ | |
519 | rq = list_last_entry(&ring->request_list, typeof(*rq), ring_link); | |
520 | cond_synchronize_rcu(rq->rcustate); | |
521 | ||
522 | /* Retire our old requests in the hope that we free some */ | |
523 | ring_retire_requests(ring); | |
524 | ||
525 | out: | |
526 | return kmem_cache_alloc(ce->gem_context->i915->requests, GFP_KERNEL); | |
527 | } | |
528 | ||
78108584 TU |
529 | static int add_barrier(struct i915_request *rq, struct i915_gem_active *active) |
530 | { | |
531 | struct i915_request *barrier = | |
532 | i915_gem_active_raw(active, &rq->i915->drm.struct_mutex); | |
533 | ||
534 | return barrier ? i915_request_await_dma_fence(rq, &barrier->fence) : 0; | |
535 | } | |
536 | ||
537 | static int add_timeline_barrier(struct i915_request *rq) | |
538 | { | |
539 | return add_barrier(rq, &rq->timeline->barrier); | |
540 | } | |
541 | ||
8e637178 | 542 | /** |
e61e0f51 | 543 | * i915_request_alloc - allocate a request structure |
8e637178 CW |
544 | * |
545 | * @engine: engine that we wish to issue the request on. | |
546 | * @ctx: context that the request will be associated with. | |
8e637178 CW |
547 | * |
548 | * Returns a pointer to the allocated request if successful, | |
549 | * or an error code if not. | |
550 | */ | |
e61e0f51 CW |
551 | struct i915_request * |
552 | i915_request_alloc(struct intel_engine_cs *engine, struct i915_gem_context *ctx) | |
05235c53 | 553 | { |
e61e0f51 CW |
554 | struct drm_i915_private *i915 = engine->i915; |
555 | struct i915_request *rq; | |
1fc44d9b | 556 | struct intel_context *ce; |
05235c53 CW |
557 | int ret; |
558 | ||
e61e0f51 | 559 | lockdep_assert_held(&i915->drm.struct_mutex); |
28176ef4 | 560 | |
e7af3116 CW |
561 | /* |
562 | * Preempt contexts are reserved for exclusive use to inject a | |
563 | * preemption context switch. They are never to be used for any trivial | |
564 | * request! | |
565 | */ | |
e61e0f51 | 566 | GEM_BUG_ON(ctx == i915->preempt_context); |
e7af3116 | 567 | |
e61e0f51 CW |
568 | /* |
569 | * ABI: Before userspace accesses the GPU (e.g. execbuffer), report | |
6ffb7d07 | 570 | * EIO if the GPU is already wedged. |
05235c53 | 571 | */ |
e61e0f51 | 572 | if (i915_terminally_wedged(&i915->gpu_error)) |
6ffb7d07 | 573 | return ERR_PTR(-EIO); |
05235c53 | 574 | |
e61e0f51 CW |
575 | /* |
576 | * Pinning the contexts may generate requests in order to acquire | |
e8a9c58f CW |
577 | * GGTT space, so do this first before we reserve a seqno for |
578 | * ourselves. | |
579 | */ | |
1fc44d9b CW |
580 | ce = intel_context_pin(ctx, engine); |
581 | if (IS_ERR(ce)) | |
582 | return ERR_CAST(ce); | |
28176ef4 | 583 | |
6faf5916 | 584 | reserve_gt(i915); |
e8a9c58f | 585 | |
b887d615 | 586 | /* Move our oldest request to the slab-cache (if not in use!) */ |
1fc44d9b CW |
587 | rq = list_first_entry(&ce->ring->request_list, typeof(*rq), ring_link); |
588 | if (!list_is_last(&rq->ring_link, &ce->ring->request_list) && | |
7c572e1b | 589 | i915_request_completed(rq)) |
e61e0f51 | 590 | i915_request_retire(rq); |
9b5f4e5e | 591 | |
e61e0f51 CW |
592 | /* |
593 | * Beware: Dragons be flying overhead. | |
5a198b8c CW |
594 | * |
595 | * We use RCU to look up requests in flight. The lookups may | |
596 | * race with the request being allocated from the slab freelist. | |
597 | * That is the request we are writing to here, may be in the process | |
1426f715 | 598 | * of being read by __i915_gem_active_get_rcu(). As such, |
5a198b8c CW |
599 | * we have to be very careful when overwriting the contents. During |
600 | * the RCU lookup, we change chase the request->engine pointer, | |
65e4760e | 601 | * read the request->global_seqno and increment the reference count. |
5a198b8c CW |
602 | * |
603 | * The reference count is incremented atomically. If it is zero, | |
604 | * the lookup knows the request is unallocated and complete. Otherwise, | |
605 | * it is either still in use, or has been reallocated and reset | |
f54d1867 CW |
606 | * with dma_fence_init(). This increment is safe for release as we |
607 | * check that the request we have a reference to and matches the active | |
5a198b8c CW |
608 | * request. |
609 | * | |
610 | * Before we increment the refcount, we chase the request->engine | |
611 | * pointer. We must not call kmem_cache_zalloc() or else we set | |
612 | * that pointer to NULL and cause a crash during the lookup. If | |
613 | * we see the request is completed (based on the value of the | |
614 | * old engine and seqno), the lookup is complete and reports NULL. | |
615 | * If we decide the request is not completed (new engine or seqno), | |
616 | * then we grab a reference and double check that it is still the | |
617 | * active request - which it won't be and restart the lookup. | |
618 | * | |
619 | * Do not use kmem_cache_zalloc() here! | |
620 | */ | |
e61e0f51 CW |
621 | rq = kmem_cache_alloc(i915->requests, |
622 | GFP_KERNEL | __GFP_RETRY_MAYFAIL | __GFP_NOWARN); | |
623 | if (unlikely(!rq)) { | |
d22ba0cb | 624 | rq = i915_request_alloc_slow(ce); |
e61e0f51 | 625 | if (!rq) { |
31c70f97 CW |
626 | ret = -ENOMEM; |
627 | goto err_unreserve; | |
628 | } | |
28176ef4 | 629 | } |
05235c53 | 630 | |
11abf0c5 CW |
631 | rq->rcustate = get_state_synchronize_rcu(); |
632 | ||
65fcb806 CW |
633 | INIT_LIST_HEAD(&rq->active_list); |
634 | rq->i915 = i915; | |
635 | rq->engine = engine; | |
4e0d64db | 636 | rq->gem_context = ctx; |
1fc44d9b CW |
637 | rq->hw_context = ce; |
638 | rq->ring = ce->ring; | |
639 | rq->timeline = ce->ring->timeline; | |
a89d1f92 | 640 | GEM_BUG_ON(rq->timeline == &engine->timeline); |
5013eb8c | 641 | rq->hwsp_seqno = rq->timeline->hwsp_seqno; |
73cb9701 | 642 | |
e61e0f51 CW |
643 | spin_lock_init(&rq->lock); |
644 | dma_fence_init(&rq->fence, | |
f54d1867 | 645 | &i915_fence_ops, |
e61e0f51 CW |
646 | &rq->lock, |
647 | rq->timeline->fence_context, | |
648 | timeline_get_seqno(rq->timeline)); | |
04769652 | 649 | |
48bc2a4a | 650 | /* We bump the ref for the fence chain */ |
e61e0f51 | 651 | i915_sw_fence_init(&i915_request_get(rq)->submit, submit_notify); |
5590af3e | 652 | |
0c7112a0 | 653 | i915_sched_node_init(&rq->sched); |
52e54209 | 654 | |
5a198b8c | 655 | /* No zalloc, must clear what we need by hand */ |
e61e0f51 | 656 | rq->global_seqno = 0; |
e61e0f51 CW |
657 | rq->file_priv = NULL; |
658 | rq->batch = NULL; | |
659 | rq->capture_list = NULL; | |
660 | rq->waitboost = false; | |
5a198b8c | 661 | |
05235c53 CW |
662 | /* |
663 | * Reserve space in the ring buffer for all the commands required to | |
664 | * eventually emit this request. This is to guarantee that the | |
e61e0f51 | 665 | * i915_request_add() call can't fail. Note that the reserve may need |
05235c53 CW |
666 | * to be redone if the request is not actually submitted straight |
667 | * away, e.g. because a GPU scheduler has deferred it. | |
ed2922c0 CW |
668 | * |
669 | * Note that due to how we add reserved_space to intel_ring_begin() | |
670 | * we need to double our request to ensure that if we need to wrap | |
671 | * around inside i915_request_add() there is sufficient space at | |
672 | * the beginning of the ring as well. | |
05235c53 | 673 | */ |
85474441 | 674 | rq->reserved_space = 2 * engine->emit_fini_breadcrumb_dw * sizeof(u32); |
05235c53 | 675 | |
2113184c CW |
676 | /* |
677 | * Record the position of the start of the request so that | |
d045446d CW |
678 | * should we detect the updated seqno part-way through the |
679 | * GPU processing the request, we never over-estimate the | |
680 | * position of the head. | |
681 | */ | |
e61e0f51 | 682 | rq->head = rq->ring->emit; |
d045446d | 683 | |
78108584 TU |
684 | ret = add_timeline_barrier(rq); |
685 | if (ret) | |
686 | goto err_unwind; | |
687 | ||
e61e0f51 | 688 | ret = engine->request_alloc(rq); |
b1c24a61 CW |
689 | if (ret) |
690 | goto err_unwind; | |
2113184c | 691 | |
b887d615 | 692 | /* Keep a second pin for the dual retirement along engine and ring */ |
1fc44d9b | 693 | __intel_context_pin(ce); |
b887d615 | 694 | |
b3ee09a4 CW |
695 | rq->infix = rq->ring->emit; /* end of header; start of user payload */ |
696 | ||
9b6586ae | 697 | /* Check that we didn't interrupt ourselves with a new request */ |
e61e0f51 CW |
698 | GEM_BUG_ON(rq->timeline->seqno != rq->fence.seqno); |
699 | return rq; | |
05235c53 | 700 | |
b1c24a61 | 701 | err_unwind: |
1fc44d9b | 702 | ce->ring->emit = rq->head; |
b1c24a61 | 703 | |
1618bdb8 | 704 | /* Make sure we didn't add ourselves to external state before freeing */ |
e61e0f51 | 705 | GEM_BUG_ON(!list_empty(&rq->active_list)); |
0c7112a0 CW |
706 | GEM_BUG_ON(!list_empty(&rq->sched.signalers_list)); |
707 | GEM_BUG_ON(!list_empty(&rq->sched.waiters_list)); | |
1618bdb8 | 708 | |
e61e0f51 | 709 | kmem_cache_free(i915->requests, rq); |
28176ef4 | 710 | err_unreserve: |
52d7f16e | 711 | unreserve_gt(i915); |
1fc44d9b | 712 | intel_context_unpin(ce); |
8e637178 | 713 | return ERR_PTR(ret); |
05235c53 CW |
714 | } |
715 | ||
a2bc4695 | 716 | static int |
e61e0f51 | 717 | i915_request_await_request(struct i915_request *to, struct i915_request *from) |
a2bc4695 | 718 | { |
85e17f59 | 719 | int ret; |
a2bc4695 CW |
720 | |
721 | GEM_BUG_ON(to == from); | |
ceae14bd | 722 | GEM_BUG_ON(to->timeline == from->timeline); |
a2bc4695 | 723 | |
e61e0f51 | 724 | if (i915_request_completed(from)) |
ade0b0c9 CW |
725 | return 0; |
726 | ||
52e54209 | 727 | if (to->engine->schedule) { |
0c7112a0 CW |
728 | ret = i915_sched_node_add_dependency(to->i915, |
729 | &to->sched, | |
730 | &from->sched); | |
52e54209 CW |
731 | if (ret < 0) |
732 | return ret; | |
733 | } | |
734 | ||
73cb9701 CW |
735 | if (to->engine == from->engine) { |
736 | ret = i915_sw_fence_await_sw_fence_gfp(&to->submit, | |
737 | &from->submit, | |
2abe2f84 | 738 | I915_FENCE_GFP); |
6faf5916 CW |
739 | } else { |
740 | ret = i915_sw_fence_await_dma_fence(&to->submit, | |
741 | &from->fence, 0, | |
742 | I915_FENCE_GFP); | |
a2bc4695 CW |
743 | } |
744 | ||
fc9d4d2b | 745 | return ret < 0 ? ret : 0; |
a2bc4695 CW |
746 | } |
747 | ||
b52992c0 | 748 | int |
e61e0f51 | 749 | i915_request_await_dma_fence(struct i915_request *rq, struct dma_fence *fence) |
b52992c0 | 750 | { |
29ef3fa9 CW |
751 | struct dma_fence **child = &fence; |
752 | unsigned int nchild = 1; | |
b52992c0 | 753 | int ret; |
b52992c0 | 754 | |
e61e0f51 CW |
755 | /* |
756 | * Note that if the fence-array was created in signal-on-any mode, | |
b52992c0 CW |
757 | * we should *not* decompose it into its individual fences. However, |
758 | * we don't currently store which mode the fence-array is operating | |
759 | * in. Fortunately, the only user of signal-on-any is private to | |
760 | * amdgpu and we should not see any incoming fence-array from | |
761 | * sync-file being in signal-on-any mode. | |
762 | */ | |
29ef3fa9 CW |
763 | if (dma_fence_is_array(fence)) { |
764 | struct dma_fence_array *array = to_dma_fence_array(fence); | |
765 | ||
766 | child = array->fences; | |
767 | nchild = array->num_fences; | |
768 | GEM_BUG_ON(!nchild); | |
769 | } | |
b52992c0 | 770 | |
29ef3fa9 CW |
771 | do { |
772 | fence = *child++; | |
773 | if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)) | |
774 | continue; | |
b52992c0 | 775 | |
ceae14bd CW |
776 | /* |
777 | * Requests on the same timeline are explicitly ordered, along | |
e61e0f51 | 778 | * with their dependencies, by i915_request_add() which ensures |
ceae14bd CW |
779 | * that requests are submitted in-order through each ring. |
780 | */ | |
e61e0f51 | 781 | if (fence->context == rq->fence.context) |
ceae14bd CW |
782 | continue; |
783 | ||
47979480 | 784 | /* Squash repeated waits to the same timelines */ |
e61e0f51 | 785 | if (fence->context != rq->i915->mm.unordered_timeline && |
a89d1f92 | 786 | i915_timeline_sync_is_later(rq->timeline, fence)) |
47979480 CW |
787 | continue; |
788 | ||
29ef3fa9 | 789 | if (dma_fence_is_i915(fence)) |
e61e0f51 | 790 | ret = i915_request_await_request(rq, to_request(fence)); |
b52992c0 | 791 | else |
e61e0f51 | 792 | ret = i915_sw_fence_await_dma_fence(&rq->submit, fence, |
29ef3fa9 | 793 | I915_FENCE_TIMEOUT, |
2abe2f84 | 794 | I915_FENCE_GFP); |
b52992c0 CW |
795 | if (ret < 0) |
796 | return ret; | |
47979480 CW |
797 | |
798 | /* Record the latest fence used against each timeline */ | |
e61e0f51 | 799 | if (fence->context != rq->i915->mm.unordered_timeline) |
a89d1f92 | 800 | i915_timeline_sync_set(rq->timeline, fence); |
29ef3fa9 | 801 | } while (--nchild); |
b52992c0 CW |
802 | |
803 | return 0; | |
804 | } | |
805 | ||
a2bc4695 | 806 | /** |
e61e0f51 | 807 | * i915_request_await_object - set this request to (async) wait upon a bo |
a2bc4695 CW |
808 | * @to: request we are wishing to use |
809 | * @obj: object which may be in use on another ring. | |
d8802126 | 810 | * @write: whether the wait is on behalf of a writer |
a2bc4695 CW |
811 | * |
812 | * This code is meant to abstract object synchronization with the GPU. | |
813 | * Conceptually we serialise writes between engines inside the GPU. | |
814 | * We only allow one engine to write into a buffer at any time, but | |
815 | * multiple readers. To ensure each has a coherent view of memory, we must: | |
816 | * | |
817 | * - If there is an outstanding write request to the object, the new | |
818 | * request must wait for it to complete (either CPU or in hw, requests | |
819 | * on the same ring will be naturally ordered). | |
820 | * | |
821 | * - If we are a write request (pending_write_domain is set), the new | |
822 | * request must wait for outstanding read requests to complete. | |
823 | * | |
824 | * Returns 0 if successful, else propagates up the lower layer error. | |
825 | */ | |
826 | int | |
e61e0f51 CW |
827 | i915_request_await_object(struct i915_request *to, |
828 | struct drm_i915_gem_object *obj, | |
829 | bool write) | |
a2bc4695 | 830 | { |
d07f0e59 CW |
831 | struct dma_fence *excl; |
832 | int ret = 0; | |
a2bc4695 CW |
833 | |
834 | if (write) { | |
d07f0e59 CW |
835 | struct dma_fence **shared; |
836 | unsigned int count, i; | |
837 | ||
838 | ret = reservation_object_get_fences_rcu(obj->resv, | |
839 | &excl, &count, &shared); | |
840 | if (ret) | |
841 | return ret; | |
842 | ||
843 | for (i = 0; i < count; i++) { | |
e61e0f51 | 844 | ret = i915_request_await_dma_fence(to, shared[i]); |
d07f0e59 CW |
845 | if (ret) |
846 | break; | |
847 | ||
848 | dma_fence_put(shared[i]); | |
849 | } | |
850 | ||
851 | for (; i < count; i++) | |
852 | dma_fence_put(shared[i]); | |
853 | kfree(shared); | |
a2bc4695 | 854 | } else { |
d07f0e59 | 855 | excl = reservation_object_get_excl_rcu(obj->resv); |
a2bc4695 CW |
856 | } |
857 | ||
d07f0e59 CW |
858 | if (excl) { |
859 | if (ret == 0) | |
e61e0f51 | 860 | ret = i915_request_await_dma_fence(to, excl); |
a2bc4695 | 861 | |
d07f0e59 | 862 | dma_fence_put(excl); |
a2bc4695 CW |
863 | } |
864 | ||
d07f0e59 | 865 | return ret; |
a2bc4695 CW |
866 | } |
867 | ||
6dd7526f CW |
868 | void i915_request_skip(struct i915_request *rq, int error) |
869 | { | |
870 | void *vaddr = rq->ring->vaddr; | |
871 | u32 head; | |
872 | ||
873 | GEM_BUG_ON(!IS_ERR_VALUE((long)error)); | |
874 | dma_fence_set_error(&rq->fence, error); | |
875 | ||
876 | /* | |
877 | * As this request likely depends on state from the lost | |
878 | * context, clear out all the user operations leaving the | |
879 | * breadcrumb at the end (so we get the fence notifications). | |
880 | */ | |
881 | head = rq->infix; | |
882 | if (rq->postfix < head) { | |
883 | memset(vaddr + head, 0, rq->ring->size - head); | |
884 | head = 0; | |
885 | } | |
886 | memset(vaddr + head, 0, rq->postfix - head); | |
887 | } | |
888 | ||
05235c53 CW |
889 | /* |
890 | * NB: This function is not allowed to fail. Doing so would mean the the | |
891 | * request is not being tracked for completion but the work itself is | |
892 | * going to happen on the hardware. This would be a Bad Thing(tm). | |
893 | */ | |
697b9a87 | 894 | void i915_request_add(struct i915_request *request) |
05235c53 | 895 | { |
95b2ab56 | 896 | struct intel_engine_cs *engine = request->engine; |
a89d1f92 | 897 | struct i915_timeline *timeline = request->timeline; |
1fc44d9b | 898 | struct intel_ring *ring = request->ring; |
e61e0f51 | 899 | struct i915_request *prev; |
73dec95e | 900 | u32 *cs; |
05235c53 | 901 | |
dd847a70 | 902 | GEM_TRACE("%s fence %llx:%lld\n", |
d9b13c4d CW |
903 | engine->name, request->fence.context, request->fence.seqno); |
904 | ||
4c7d62c6 | 905 | lockdep_assert_held(&request->i915->drm.struct_mutex); |
e61e0f51 | 906 | trace_i915_request_add(request); |
0f25dff6 | 907 | |
8ac71d1d CW |
908 | /* |
909 | * Make sure that no request gazumped us - if it was allocated after | |
e61e0f51 | 910 | * our i915_request_alloc() and called __i915_request_add() before |
c781c978 CW |
911 | * us, the timeline will hold its seqno which is later than ours. |
912 | */ | |
9b6586ae | 913 | GEM_BUG_ON(timeline->seqno != request->fence.seqno); |
c781c978 | 914 | |
05235c53 CW |
915 | /* |
916 | * To ensure that this call will not fail, space for its emissions | |
917 | * should already have been reserved in the ring buffer. Let the ring | |
918 | * know that it is time to use that space up. | |
919 | */ | |
ed2922c0 | 920 | GEM_BUG_ON(request->reserved_space > request->ring->space); |
05235c53 | 921 | request->reserved_space = 0; |
05235c53 | 922 | |
8ac71d1d CW |
923 | /* |
924 | * Record the position of the start of the breadcrumb so that | |
05235c53 CW |
925 | * should we detect the updated seqno part-way through the |
926 | * GPU processing the request, we never over-estimate the | |
d045446d | 927 | * position of the ring's HEAD. |
05235c53 | 928 | */ |
85474441 | 929 | cs = intel_ring_begin(request, engine->emit_fini_breadcrumb_dw); |
73dec95e TU |
930 | GEM_BUG_ON(IS_ERR(cs)); |
931 | request->postfix = intel_ring_offset(request, cs); | |
05235c53 | 932 | |
8ac71d1d CW |
933 | /* |
934 | * Seal the request and mark it as pending execution. Note that | |
0f25dff6 CW |
935 | * we may inspect this state, without holding any locks, during |
936 | * hangcheck. Hence we apply the barrier to ensure that we do not | |
937 | * see a more recent value in the hws than we are tracking. | |
938 | */ | |
0a046a0e | 939 | |
73cb9701 | 940 | prev = i915_gem_active_raw(&timeline->last_request, |
0a046a0e | 941 | &request->i915->drm.struct_mutex); |
e61e0f51 | 942 | if (prev && !i915_request_completed(prev)) { |
0a046a0e CW |
943 | i915_sw_fence_await_sw_fence(&request->submit, &prev->submit, |
944 | &request->submitq); | |
52e54209 | 945 | if (engine->schedule) |
0c7112a0 CW |
946 | __i915_sched_node_add_dependency(&request->sched, |
947 | &prev->sched, | |
948 | &request->dep, | |
949 | 0); | |
52e54209 | 950 | } |
0a046a0e | 951 | |
80b204bc | 952 | spin_lock_irq(&timeline->lock); |
f2d13290 | 953 | list_add_tail(&request->link, &timeline->requests); |
80b204bc CW |
954 | spin_unlock_irq(&timeline->lock); |
955 | ||
9b6586ae | 956 | GEM_BUG_ON(timeline->seqno != request->fence.seqno); |
73cb9701 | 957 | i915_gem_active_set(&timeline->last_request, request); |
f2d13290 | 958 | |
0f25dff6 | 959 | list_add_tail(&request->ring_link, &ring->request_list); |
09a4c02e CW |
960 | if (list_is_first(&request->ring_link, &ring->request_list)) { |
961 | GEM_TRACE("marking %s as active\n", ring->timeline->name); | |
643b450a | 962 | list_add(&ring->active_link, &request->i915->gt.active_rings); |
09a4c02e | 963 | } |
f2d13290 | 964 | request->emitted_jiffies = jiffies; |
0f25dff6 | 965 | |
8ac71d1d CW |
966 | /* |
967 | * Let the backend know a new request has arrived that may need | |
0de9136d CW |
968 | * to adjust the existing execution schedule due to a high priority |
969 | * request - i.e. we may want to preempt the current request in order | |
970 | * to run a high priority dependency chain *before* we can execute this | |
971 | * request. | |
972 | * | |
973 | * This is called before the request is ready to run so that we can | |
974 | * decide whether to preempt the entire chain so that it is ready to | |
975 | * run at the earliest possible convenience. | |
976 | */ | |
71ace7ca CW |
977 | local_bh_disable(); |
978 | rcu_read_lock(); /* RCU serialisation for set-wedged protection */ | |
b16c7651 CW |
979 | if (engine->schedule) { |
980 | struct i915_sched_attr attr = request->gem_context->sched; | |
981 | ||
982 | /* | |
983 | * Boost priorities to new clients (new request flows). | |
984 | * | |
985 | * Allow interactive/synchronous clients to jump ahead of | |
986 | * the bulk clients. (FQ_CODEL) | |
987 | */ | |
1413b2bc | 988 | if (list_empty(&request->sched.signalers_list)) |
b16c7651 CW |
989 | attr.priority |= I915_PRIORITY_NEWCLIENT; |
990 | ||
991 | engine->schedule(request, &attr); | |
992 | } | |
47650db0 | 993 | rcu_read_unlock(); |
5590af3e CW |
994 | i915_sw_fence_commit(&request->submit); |
995 | local_bh_enable(); /* Kick the execlists tasklet if just scheduled */ | |
c22b355f CW |
996 | |
997 | /* | |
998 | * In typical scenarios, we do not expect the previous request on | |
999 | * the timeline to be still tracked by timeline->last_request if it | |
1000 | * has been completed. If the completed request is still here, that | |
1001 | * implies that request retirement is a long way behind submission, | |
1002 | * suggesting that we haven't been retiring frequently enough from | |
1003 | * the combination of retire-before-alloc, waiters and the background | |
1004 | * retirement worker. So if the last request on this timeline was | |
1005 | * already completed, do a catch up pass, flushing the retirement queue | |
1006 | * up to this client. Since we have now moved the heaviest operations | |
1007 | * during retirement onto secondary workers, such as freeing objects | |
1008 | * or contexts, retiring a bunch of requests is mostly list management | |
1009 | * (and cache misses), and so we should not be overly penalizing this | |
1010 | * client by performing excess work, though we may still performing | |
1011 | * work on behalf of others -- but instead we should benefit from | |
1012 | * improved resource management. (Well, that's the theory at least.) | |
1013 | */ | |
e61e0f51 CW |
1014 | if (prev && i915_request_completed(prev)) |
1015 | i915_request_retire_upto(prev); | |
05235c53 CW |
1016 | } |
1017 | ||
1018 | static unsigned long local_clock_us(unsigned int *cpu) | |
1019 | { | |
1020 | unsigned long t; | |
1021 | ||
e61e0f51 CW |
1022 | /* |
1023 | * Cheaply and approximately convert from nanoseconds to microseconds. | |
05235c53 CW |
1024 | * The result and subsequent calculations are also defined in the same |
1025 | * approximate microseconds units. The principal source of timing | |
1026 | * error here is from the simple truncation. | |
1027 | * | |
1028 | * Note that local_clock() is only defined wrt to the current CPU; | |
1029 | * the comparisons are no longer valid if we switch CPUs. Instead of | |
1030 | * blocking preemption for the entire busywait, we can detect the CPU | |
1031 | * switch and use that as indicator of system load and a reason to | |
1032 | * stop busywaiting, see busywait_stop(). | |
1033 | */ | |
1034 | *cpu = get_cpu(); | |
1035 | t = local_clock() >> 10; | |
1036 | put_cpu(); | |
1037 | ||
1038 | return t; | |
1039 | } | |
1040 | ||
1041 | static bool busywait_stop(unsigned long timeout, unsigned int cpu) | |
1042 | { | |
1043 | unsigned int this_cpu; | |
1044 | ||
1045 | if (time_after(local_clock_us(&this_cpu), timeout)) | |
1046 | return true; | |
1047 | ||
1048 | return this_cpu != cpu; | |
1049 | } | |
1050 | ||
52c0fdb2 CW |
1051 | static bool __i915_spin_request(const struct i915_request * const rq, |
1052 | int state, unsigned long timeout_us) | |
05235c53 | 1053 | { |
52c0fdb2 | 1054 | unsigned int cpu; |
b2f2f0fc CW |
1055 | |
1056 | /* | |
1057 | * Only wait for the request if we know it is likely to complete. | |
1058 | * | |
1059 | * We don't track the timestamps around requests, nor the average | |
1060 | * request length, so we do not have a good indicator that this | |
1061 | * request will complete within the timeout. What we do know is the | |
52c0fdb2 CW |
1062 | * order in which requests are executed by the context and so we can |
1063 | * tell if the request has been started. If the request is not even | |
1064 | * running yet, it is a fair assumption that it will not complete | |
1065 | * within our relatively short timeout. | |
b2f2f0fc | 1066 | */ |
52c0fdb2 | 1067 | if (!i915_request_is_running(rq)) |
b2f2f0fc CW |
1068 | return false; |
1069 | ||
e61e0f51 CW |
1070 | /* |
1071 | * When waiting for high frequency requests, e.g. during synchronous | |
05235c53 CW |
1072 | * rendering split between the CPU and GPU, the finite amount of time |
1073 | * required to set up the irq and wait upon it limits the response | |
1074 | * rate. By busywaiting on the request completion for a short while we | |
1075 | * can service the high frequency waits as quick as possible. However, | |
1076 | * if it is a slow request, we want to sleep as quickly as possible. | |
1077 | * The tradeoff between waiting and sleeping is roughly the time it | |
1078 | * takes to sleep on a request, on the order of a microsecond. | |
1079 | */ | |
1080 | ||
1081 | timeout_us += local_clock_us(&cpu); | |
1082 | do { | |
52c0fdb2 CW |
1083 | if (i915_request_completed(rq)) |
1084 | return true; | |
c33ed067 | 1085 | |
05235c53 CW |
1086 | if (signal_pending_state(state, current)) |
1087 | break; | |
1088 | ||
1089 | if (busywait_stop(timeout_us, cpu)) | |
1090 | break; | |
1091 | ||
f2f09a4c | 1092 | cpu_relax(); |
05235c53 CW |
1093 | } while (!need_resched()); |
1094 | ||
1095 | return false; | |
1096 | } | |
1097 | ||
52c0fdb2 CW |
1098 | struct request_wait { |
1099 | struct dma_fence_cb cb; | |
1100 | struct task_struct *tsk; | |
1101 | }; | |
1102 | ||
1103 | static void request_wait_wake(struct dma_fence *fence, struct dma_fence_cb *cb) | |
1104 | { | |
1105 | struct request_wait *wait = container_of(cb, typeof(*wait), cb); | |
1106 | ||
1107 | wake_up_process(wait->tsk); | |
1108 | } | |
1109 | ||
05235c53 | 1110 | /** |
e532be89 | 1111 | * i915_request_wait - wait until execution of request has finished |
e61e0f51 | 1112 | * @rq: the request to wait upon |
ea746f36 | 1113 | * @flags: how to wait |
e95433c7 CW |
1114 | * @timeout: how long to wait in jiffies |
1115 | * | |
e532be89 | 1116 | * i915_request_wait() waits for the request to be completed, for a |
e95433c7 CW |
1117 | * maximum of @timeout jiffies (with MAX_SCHEDULE_TIMEOUT implying an |
1118 | * unbounded wait). | |
05235c53 | 1119 | * |
e95433c7 CW |
1120 | * If the caller holds the struct_mutex, the caller must pass I915_WAIT_LOCKED |
1121 | * in via the flags, and vice versa if the struct_mutex is not held, the caller | |
1122 | * must not specify that the wait is locked. | |
05235c53 | 1123 | * |
e95433c7 CW |
1124 | * Returns the remaining time (in jiffies) if the request completed, which may |
1125 | * be zero or -ETIME if the request is unfinished after the timeout expires. | |
1126 | * May return -EINTR is called with I915_WAIT_INTERRUPTIBLE and a signal is | |
1127 | * pending before the request completes. | |
05235c53 | 1128 | */ |
e61e0f51 | 1129 | long i915_request_wait(struct i915_request *rq, |
e95433c7 CW |
1130 | unsigned int flags, |
1131 | long timeout) | |
05235c53 | 1132 | { |
ea746f36 CW |
1133 | const int state = flags & I915_WAIT_INTERRUPTIBLE ? |
1134 | TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE; | |
52c0fdb2 | 1135 | struct request_wait wait; |
05235c53 CW |
1136 | |
1137 | might_sleep(); | |
e95433c7 | 1138 | GEM_BUG_ON(timeout < 0); |
05235c53 | 1139 | |
e61e0f51 | 1140 | if (i915_request_completed(rq)) |
e95433c7 | 1141 | return timeout; |
05235c53 | 1142 | |
e95433c7 CW |
1143 | if (!timeout) |
1144 | return -ETIME; | |
05235c53 | 1145 | |
e61e0f51 | 1146 | trace_i915_request_wait_begin(rq, flags); |
4680816b | 1147 | |
52c0fdb2 CW |
1148 | /* Optimistic short spin before touching IRQs */ |
1149 | if (__i915_spin_request(rq, state, 5)) | |
1150 | goto out; | |
541ca6ed | 1151 | |
52c0fdb2 CW |
1152 | if (flags & I915_WAIT_PRIORITY) |
1153 | i915_schedule_bump_priority(rq, I915_PRIORITY_WAIT); | |
4680816b | 1154 | |
52c0fdb2 CW |
1155 | wait.tsk = current; |
1156 | if (dma_fence_add_callback(&rq->fence, &wait.cb, request_wait_wake)) | |
1157 | goto out; | |
4680816b | 1158 | |
52c0fdb2 CW |
1159 | for (;;) { |
1160 | set_current_state(state); | |
05235c53 | 1161 | |
52c0fdb2 CW |
1162 | if (i915_request_completed(rq)) |
1163 | break; | |
05235c53 | 1164 | |
05235c53 | 1165 | if (signal_pending_state(state, current)) { |
e95433c7 | 1166 | timeout = -ERESTARTSYS; |
05235c53 CW |
1167 | break; |
1168 | } | |
1169 | ||
e95433c7 CW |
1170 | if (!timeout) { |
1171 | timeout = -ETIME; | |
05235c53 CW |
1172 | break; |
1173 | } | |
1174 | ||
e95433c7 | 1175 | timeout = io_schedule_timeout(timeout); |
05235c53 | 1176 | } |
a49625f9 | 1177 | __set_current_state(TASK_RUNNING); |
05235c53 | 1178 | |
52c0fdb2 CW |
1179 | dma_fence_remove_callback(&rq->fence, &wait.cb); |
1180 | ||
1181 | out: | |
1182 | trace_i915_request_wait_end(rq); | |
e95433c7 | 1183 | return timeout; |
05235c53 | 1184 | } |
4b8de8e6 | 1185 | |
e61e0f51 | 1186 | void i915_retire_requests(struct drm_i915_private *i915) |
4b8de8e6 | 1187 | { |
643b450a | 1188 | struct intel_ring *ring, *tmp; |
4b8de8e6 | 1189 | |
e61e0f51 | 1190 | lockdep_assert_held(&i915->drm.struct_mutex); |
4b8de8e6 | 1191 | |
e61e0f51 | 1192 | if (!i915->gt.active_requests) |
4b8de8e6 CW |
1193 | return; |
1194 | ||
643b450a | 1195 | list_for_each_entry_safe(ring, tmp, &i915->gt.active_rings, active_link) |
b887d615 | 1196 | ring_retire_requests(ring); |
4b8de8e6 | 1197 | } |
c835c550 CW |
1198 | |
1199 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) | |
1200 | #include "selftests/mock_request.c" | |
e61e0f51 | 1201 | #include "selftests/i915_request.c" |
c835c550 | 1202 | #endif |