Commit | Line | Data |
---|---|---|
70a2b431 | 1 | // SPDX-License-Identifier: MIT |
b20385f1 OM |
2 | /* |
3 | * Copyright © 2014 Intel Corporation | |
b20385f1 OM |
4 | */ |
5 | ||
73e4d07f OM |
6 | /** |
7 | * DOC: Logical Rings, Logical Ring Contexts and Execlists | |
8 | * | |
9 | * Motivation: | |
b20385f1 OM |
10 | * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts". |
11 | * These expanded contexts enable a number of new abilities, especially | |
12 | * "Execlists" (also implemented in this file). | |
13 | * | |
73e4d07f OM |
14 | * One of the main differences with the legacy HW contexts is that logical |
15 | * ring contexts incorporate many more things to the context's state, like | |
16 | * PDPs or ringbuffer control registers: | |
17 | * | |
18 | * The reason why PDPs are included in the context is straightforward: as | |
19 | * PPGTTs (per-process GTTs) are actually per-context, having the PDPs | |
20 | * contained there mean you don't need to do a ppgtt->switch_mm yourself, | |
21 | * instead, the GPU will do it for you on the context switch. | |
22 | * | |
23 | * But, what about the ringbuffer control registers (head, tail, etc..)? | |
24 | * shouldn't we just need a set of those per engine command streamer? This is | |
25 | * where the name "Logical Rings" starts to make sense: by virtualizing the | |
26 | * rings, the engine cs shifts to a new "ring buffer" with every context | |
27 | * switch. When you want to submit a workload to the GPU you: A) choose your | |
28 | * context, B) find its appropriate virtualized ring, C) write commands to it | |
29 | * and then, finally, D) tell the GPU to switch to that context. | |
30 | * | |
31 | * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch | |
32 | * to a contexts is via a context execution list, ergo "Execlists". | |
33 | * | |
34 | * LRC implementation: | |
35 | * Regarding the creation of contexts, we have: | |
36 | * | |
37 | * - One global default context. | |
38 | * - One local default context for each opened fd. | |
39 | * - One local extra context for each context create ioctl call. | |
40 | * | |
41 | * Now that ringbuffers belong per-context (and not per-engine, like before) | |
42 | * and that contexts are uniquely tied to a given engine (and not reusable, | |
43 | * like before) we need: | |
44 | * | |
45 | * - One ringbuffer per-engine inside each context. | |
46 | * - One backing object per-engine inside each context. | |
47 | * | |
48 | * The global default context starts its life with these new objects fully | |
49 | * allocated and populated. The local default context for each opened fd is | |
50 | * more complex, because we don't know at creation time which engine is going | |
51 | * to use them. To handle this, we have implemented a deferred creation of LR | |
52 | * contexts: | |
53 | * | |
54 | * The local context starts its life as a hollow or blank holder, that only | |
55 | * gets populated for a given engine once we receive an execbuffer. If later | |
56 | * on we receive another execbuffer ioctl for the same context but a different | |
57 | * engine, we allocate/populate a new ringbuffer and context backing object and | |
58 | * so on. | |
59 | * | |
60 | * Finally, regarding local contexts created using the ioctl call: as they are | |
61 | * only allowed with the render ring, we can allocate & populate them right | |
62 | * away (no need to defer anything, at least for now). | |
63 | * | |
64 | * Execlists implementation: | |
b20385f1 OM |
65 | * Execlists are the new method by which, on gen8+ hardware, workloads are |
66 | * submitted for execution (as opposed to the legacy, ringbuffer-based, method). | |
73e4d07f OM |
67 | * This method works as follows: |
68 | * | |
69 | * When a request is committed, its commands (the BB start and any leading or | |
70 | * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer | |
71 | * for the appropriate context. The tail pointer in the hardware context is not | |
72 | * updated at this time, but instead, kept by the driver in the ringbuffer | |
73 | * structure. A structure representing this request is added to a request queue | |
74 | * for the appropriate engine: this structure contains a copy of the context's | |
75 | * tail after the request was written to the ring buffer and a pointer to the | |
76 | * context itself. | |
77 | * | |
78 | * If the engine's request queue was empty before the request was added, the | |
79 | * queue is processed immediately. Otherwise the queue will be processed during | |
80 | * a context switch interrupt. In any case, elements on the queue will get sent | |
81 | * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a | |
82 | * globally unique 20-bits submission ID. | |
83 | * | |
84 | * When execution of a request completes, the GPU updates the context status | |
85 | * buffer with a context complete event and generates a context switch interrupt. | |
86 | * During the interrupt handling, the driver examines the events in the buffer: | |
87 | * for each context complete event, if the announced ID matches that on the head | |
88 | * of the request queue, then that request is retired and removed from the queue. | |
89 | * | |
90 | * After processing, if any requests were retired and the queue is not empty | |
91 | * then a new execution list can be submitted. The two requests at the front of | |
92 | * the queue are next to be submitted but since a context may not occur twice in | |
93 | * an execution list, if subsequent requests have the same ID as the first then | |
94 | * the two requests must be combined. This is done simply by discarding requests | |
95 | * at the head of the queue until either only one requests is left (in which case | |
96 | * we use a NULL second context) or the first two requests have unique IDs. | |
97 | * | |
98 | * By always executing the first two requests in the queue the driver ensures | |
99 | * that the GPU is kept as busy as possible. In the case where a single context | |
100 | * completes but a second context is still executing, the request for this second | |
101 | * context will be at the head of the queue when we remove the first one. This | |
102 | * request will then be resubmitted along with a new request for a different context, | |
103 | * which will cause the hardware to continue executing the second request and queue | |
104 | * the new request (the GPU detects the condition of a context getting preempted | |
105 | * with the same context and optimizes the context switch flow by not doing | |
106 | * preemption, but just sampling the new tail pointer). | |
107 | * | |
b20385f1 | 108 | */ |
27af5eea | 109 | #include <linux/interrupt.h> |
01fabda8 | 110 | #include <linux/string_helpers.h> |
b20385f1 | 111 | |
b20385f1 | 112 | #include "i915_drv.h" |
801543b2 | 113 | #include "i915_reg.h" |
a09d9a80 | 114 | #include "i915_trace.h" |
bc4237ec | 115 | #include "i915_vgpu.h" |
d0d829e5 | 116 | #include "gen8_engine_cs.h" |
b3786b29 | 117 | #include "intel_breadcrumbs.h" |
e6ba7648 | 118 | #include "intel_context.h" |
62eaf0ae | 119 | #include "intel_engine_heartbeat.h" |
c34c5bca | 120 | #include "intel_engine_pm.h" |
202b1f4c | 121 | #include "intel_engine_regs.h" |
4fb05a39 | 122 | #include "intel_engine_stats.h" |
70a2b431 | 123 | #include "intel_execlists_submission.h" |
2006058e | 124 | #include "intel_gt.h" |
0669a6e1 | 125 | #include "intel_gt_irq.h" |
c7302f20 | 126 | #include "intel_gt_pm.h" |
0d6419e9 | 127 | #include "intel_gt_regs.h" |
4f88f874 | 128 | #include "intel_gt_requests.h" |
a0d3fdb6 | 129 | #include "intel_lrc.h" |
578f1ac6 | 130 | #include "intel_lrc_reg.h" |
3bbaba0c | 131 | #include "intel_mocs.h" |
112ed2d3 | 132 | #include "intel_reset.h" |
2871ea85 | 133 | #include "intel_ring.h" |
7d3c425f | 134 | #include "intel_workarounds.h" |
be1cb55a | 135 | #include "shmem_utils.h" |
127f1003 | 136 | |
e981e7b1 TD |
137 | #define RING_EXECLIST_QFULL (1 << 0x2) |
138 | #define RING_EXECLIST1_VALID (1 << 0x3) | |
139 | #define RING_EXECLIST0_VALID (1 << 0x4) | |
140 | #define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE) | |
141 | #define RING_EXECLIST1_ACTIVE (1 << 0x11) | |
142 | #define RING_EXECLIST0_ACTIVE (1 << 0x12) | |
143 | ||
144 | #define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0) | |
145 | #define GEN8_CTX_STATUS_PREEMPTED (1 << 1) | |
146 | #define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2) | |
147 | #define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3) | |
148 | #define GEN8_CTX_STATUS_COMPLETE (1 << 4) | |
149 | #define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15) | |
8670d6f9 | 150 | |
70c2a24d | 151 | #define GEN8_CTX_STATUS_COMPLETED_MASK \ |
d8747afb | 152 | (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED) |
70c2a24d | 153 | |
f4785682 DCS |
154 | #define GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE (0x1) /* lower csb dword */ |
155 | #define GEN12_CTX_SWITCH_DETAIL(csb_dw) ((csb_dw) & 0xF) /* upper csb dword */ | |
156 | #define GEN12_CSB_SW_CTX_ID_MASK GENMASK(25, 15) | |
157 | #define GEN12_IDLE_CTX_ID 0x7FF | |
158 | #define GEN12_CSB_CTX_VALID(csb_dw) \ | |
159 | (FIELD_GET(GEN12_CSB_SW_CTX_ID_MASK, csb_dw) != GEN12_IDLE_CTX_ID) | |
160 | ||
50a9ea08 SS |
161 | #define XEHP_CTX_STATUS_SWITCHED_TO_NEW_QUEUE BIT(1) /* upper csb dword */ |
162 | #define XEHP_CSB_SW_CTX_ID_MASK GENMASK(31, 10) | |
163 | #define XEHP_IDLE_CTX_ID 0xFFFF | |
164 | #define XEHP_CSB_CTX_VALID(csb_dw) \ | |
165 | (FIELD_GET(XEHP_CSB_SW_CTX_ID_MASK, csb_dw) != XEHP_IDLE_CTX_ID) | |
166 | ||
0e93cdd4 CW |
167 | /* Typical size of the average request (2 pipecontrols and a MI_BB) */ |
168 | #define EXECLISTS_REQUEST_SIZE 64 /* bytes */ | |
a3aabe86 | 169 | |
6d06779e CW |
170 | struct virtual_engine { |
171 | struct intel_engine_cs base; | |
172 | struct intel_context context; | |
46eecfcc | 173 | struct rcu_work rcu; |
6d06779e CW |
174 | |
175 | /* | |
176 | * We allow only a single request through the virtual engine at a time | |
177 | * (each request in the timeline waits for the completion fence of | |
178 | * the previous before being submitted). By restricting ourselves to | |
179 | * only submitting a single request, each request is placed on to a | |
180 | * physical to maximise load spreading (by virtue of the late greedy | |
181 | * scheduling -- each real engine takes the next available request | |
182 | * upon idling). | |
183 | */ | |
184 | struct i915_request *request; | |
185 | ||
186 | /* | |
187 | * We keep a rbtree of available virtual engines inside each physical | |
188 | * engine, sorted by priority. Here we preallocate the nodes we need | |
189 | * for the virtual engine, indexed by physical_engine->id. | |
190 | */ | |
191 | struct ve_node { | |
192 | struct rb_node rb; | |
193 | int prio; | |
194 | } nodes[I915_NUM_ENGINES]; | |
195 | ||
196 | /* And finally, which physical engines this virtual engine maps onto. */ | |
197 | unsigned int num_siblings; | |
f1e79c7e | 198 | struct intel_engine_cs *siblings[]; |
6d06779e CW |
199 | }; |
200 | ||
201 | static struct virtual_engine *to_virtual_engine(struct intel_engine_cs *engine) | |
202 | { | |
203 | GEM_BUG_ON(!intel_engine_is_virtual(engine)); | |
204 | return container_of(engine, struct virtual_engine, base); | |
205 | } | |
206 | ||
55612025 | 207 | static struct intel_context * |
e5e32171 MB |
208 | execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count, |
209 | unsigned long flags); | |
55612025 | 210 | |
a7f328fc | 211 | static struct i915_request * |
7904e081 CW |
212 | __active_request(const struct intel_timeline * const tl, |
213 | struct i915_request *rq, | |
214 | int error) | |
d12acee8 | 215 | { |
a7f328fc | 216 | struct i915_request *active = rq; |
d12acee8 | 217 | |
7904e081 | 218 | list_for_each_entry_from_reverse(rq, &tl->requests, link) { |
6f0726b4 | 219 | if (__i915_request_is_complete(rq)) |
d12acee8 CW |
220 | break; |
221 | ||
7904e081 CW |
222 | if (error) { |
223 | i915_request_set_error_once(rq, error); | |
224 | __i915_request_skip(rq); | |
225 | } | |
d12acee8 CW |
226 | active = rq; |
227 | } | |
d12acee8 CW |
228 | |
229 | return active; | |
230 | } | |
231 | ||
7904e081 CW |
232 | static struct i915_request * |
233 | active_request(const struct intel_timeline * const tl, struct i915_request *rq) | |
234 | { | |
235 | return __active_request(tl, rq, 0); | |
236 | } | |
237 | ||
e7326336 | 238 | static void ring_set_paused(const struct intel_engine_cs *engine, int state) |
22b7a426 CW |
239 | { |
240 | /* | |
241 | * We inspect HWS_PREEMPT with a semaphore inside | |
242 | * engine->emit_fini_breadcrumb. If the dword is true, | |
243 | * the ring is paused as the semaphore will busywait | |
244 | * until the dword is false. | |
245 | */ | |
246 | engine->status_page.addr[I915_GEM_HWS_PREEMPT] = state; | |
8db7933e CW |
247 | if (state) |
248 | wmb(); | |
22b7a426 CW |
249 | } |
250 | ||
e7326336 | 251 | static struct i915_priolist *to_priolist(struct rb_node *rb) |
f6322edd CW |
252 | { |
253 | return rb_entry(rb, struct i915_priolist, node); | |
254 | } | |
255 | ||
e7326336 | 256 | static int rq_prio(const struct i915_request *rq) |
f6322edd | 257 | { |
a4e648a0 | 258 | return READ_ONCE(rq->sched.attr.priority); |
f6322edd CW |
259 | } |
260 | ||
b5773a36 CW |
261 | static int effective_prio(const struct i915_request *rq) |
262 | { | |
1e3f697e CW |
263 | int prio = rq_prio(rq); |
264 | ||
2a98f4e6 LL |
265 | /* |
266 | * If this request is special and must not be interrupted at any | |
267 | * cost, so be it. Note we are only checking the most recent request | |
268 | * in the context and so may be masking an earlier vip request. It | |
269 | * is hoped that under the conditions where nopreempt is used, this | |
270 | * will not matter (i.e. all requests to that context will be | |
271 | * nopreempt for as long as desired). | |
272 | */ | |
273 | if (i915_request_has_nopreempt(rq)) | |
274 | prio = I915_PRIORITY_UNPREEMPTABLE; | |
275 | ||
eec39e44 | 276 | return prio; |
b5773a36 CW |
277 | } |
278 | ||
3e28d371 | 279 | static int queue_prio(const struct i915_sched_engine *sched_engine) |
c9a64622 | 280 | { |
c9a64622 CW |
281 | struct rb_node *rb; |
282 | ||
3e28d371 | 283 | rb = rb_first_cached(&sched_engine->queue); |
c9a64622 CW |
284 | if (!rb) |
285 | return INT_MIN; | |
286 | ||
2867ff6c | 287 | return to_priolist(rb)->priority; |
c9a64622 CW |
288 | } |
289 | ||
64b7a3fa CW |
290 | static int virtual_prio(const struct intel_engine_execlists *el) |
291 | { | |
292 | struct rb_node *rb = rb_first_cached(&el->virtual); | |
293 | ||
294 | return rb ? rb_entry(rb, struct ve_node, rb)->prio : INT_MIN; | |
295 | } | |
296 | ||
e7326336 CW |
297 | static bool need_preempt(const struct intel_engine_cs *engine, |
298 | const struct i915_request *rq) | |
c9a64622 | 299 | { |
b5773a36 | 300 | int last_prio; |
c9a64622 | 301 | |
09975b86 CW |
302 | if (!intel_engine_has_semaphores(engine)) |
303 | return false; | |
304 | ||
c9a64622 CW |
305 | /* |
306 | * Check if the current priority hint merits a preemption attempt. | |
307 | * | |
308 | * We record the highest value priority we saw during rescheduling | |
309 | * prior to this dequeue, therefore we know that if it is strictly | |
310 | * less than the current tail of ESLP[0], we do not need to force | |
311 | * a preempt-to-idle cycle. | |
312 | * | |
313 | * However, the priority hint is a mere hint that we may need to | |
314 | * preempt. If that hint is stale or we may be trying to preempt | |
315 | * ourselves, ignore the request. | |
253a774b CW |
316 | * |
317 | * More naturally we would write | |
318 | * prio >= max(0, last); | |
319 | * except that we wish to prevent triggering preemption at the same | |
320 | * priority level: the task that is running should remain running | |
321 | * to preserve FIFO ordering of dependencies. | |
c9a64622 | 322 | */ |
253a774b | 323 | last_prio = max(effective_prio(rq), I915_PRIORITY_NORMAL - 1); |
3e28d371 | 324 | if (engine->sched_engine->queue_priority_hint <= last_prio) |
c9a64622 CW |
325 | return false; |
326 | ||
327 | /* | |
328 | * Check against the first request in ELSP[1], it will, thanks to the | |
329 | * power of PI, be the highest priority of that context. | |
330 | */ | |
349a2bc5 | 331 | if (!list_is_last(&rq->sched.link, &engine->sched_engine->requests) && |
422d7df4 | 332 | rq_prio(list_next_entry(rq, sched.link)) > last_prio) |
c9a64622 CW |
333 | return true; |
334 | ||
335 | /* | |
336 | * If the inflight context did not trigger the preemption, then maybe | |
337 | * it was the set of queued requests? Pick the highest priority in | |
338 | * the queue (the first active priolist) and see if it deserves to be | |
339 | * running instead of ELSP[0]. | |
340 | * | |
341 | * The highest priority request in the queue can not be either | |
342 | * ELSP[0] or ELSP[1] as, thanks again to PI, if it was the same | |
343 | * context, it's priority would not exceed ELSP[0] aka last_prio. | |
344 | */ | |
64b7a3fa | 345 | return max(virtual_prio(&engine->execlists), |
3e28d371 | 346 | queue_prio(engine->sched_engine)) > last_prio; |
c9a64622 CW |
347 | } |
348 | ||
e7326336 | 349 | __maybe_unused static bool |
c10c78ad | 350 | assert_priority_queue(const struct i915_request *prev, |
c9a64622 | 351 | const struct i915_request *next) |
f6322edd | 352 | { |
c9a64622 CW |
353 | /* |
354 | * Without preemption, the prev may refer to the still active element | |
355 | * which we refuse to let go. | |
356 | * | |
357 | * Even with preemption, there are times when we think it is better not | |
358 | * to preempt and leave an ostensibly lower priority request in flight. | |
359 | */ | |
22b7a426 | 360 | if (i915_request_is_active(prev)) |
c9a64622 CW |
361 | return true; |
362 | ||
363 | return rq_prio(prev) >= rq_prio(next); | |
f6322edd CW |
364 | } |
365 | ||
eb8d0f5a | 366 | static struct i915_request * |
4cc79cbb | 367 | __unwind_incomplete_requests(struct intel_engine_cs *engine) |
7e4992ac | 368 | { |
b16c7651 | 369 | struct i915_request *rq, *rn, *active = NULL; |
3f649ab7 | 370 | struct list_head *pl; |
4cc79cbb | 371 | int prio = I915_PRIORITY_INVALID; |
7e4992ac | 372 | |
349a2bc5 | 373 | lockdep_assert_held(&engine->sched_engine->lock); |
7e4992ac CW |
374 | |
375 | list_for_each_entry_safe_reverse(rq, rn, | |
349a2bc5 | 376 | &engine->sched_engine->requests, |
422d7df4 | 377 | sched.link) { |
6f0726b4 | 378 | if (__i915_request_is_complete(rq)) { |
b8e2bd98 CW |
379 | list_del_init(&rq->sched.link); |
380 | continue; | |
381 | } | |
7e4992ac | 382 | |
e61e0f51 | 383 | __i915_request_unsubmit(rq); |
7e4992ac | 384 | |
f81475bb CW |
385 | GEM_BUG_ON(rq_prio(rq) == I915_PRIORITY_INVALID); |
386 | if (rq_prio(rq) != prio) { | |
387 | prio = rq_prio(rq); | |
d2a31d02 MB |
388 | pl = i915_sched_lookup_priolist(engine->sched_engine, |
389 | prio); | |
f81475bb | 390 | } |
074bb195 | 391 | GEM_BUG_ON(i915_sched_engine_is_empty(engine->sched_engine)); |
b16c7651 | 392 | |
f81475bb CW |
393 | list_move(&rq->sched.link, pl); |
394 | set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); | |
672c368f | 395 | |
f81475bb CW |
396 | /* Check in case we rollback so far we wrap [size/2] */ |
397 | if (intel_ring_direction(rq->ring, | |
398 | rq->tail, | |
399 | rq->ring->tail + 8) > 0) | |
400 | rq->context->lrc.desc |= CTX_DESC_FORCE_RESTORE; | |
8ab3a381 | 401 | |
f81475bb | 402 | active = rq; |
b16c7651 CW |
403 | } |
404 | ||
eb8d0f5a | 405 | return active; |
7e4992ac CW |
406 | } |
407 | ||
292ad25c | 408 | struct i915_request * |
a4598d17 MW |
409 | execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists) |
410 | { | |
411 | struct intel_engine_cs *engine = | |
412 | container_of(execlists, typeof(*engine), execlists); | |
413 | ||
4cc79cbb | 414 | return __unwind_incomplete_requests(engine); |
a4598d17 MW |
415 | } |
416 | ||
e7326336 | 417 | static void |
e61e0f51 | 418 | execlists_context_status_change(struct i915_request *rq, unsigned long status) |
84b790f8 | 419 | { |
bbd6c47e CW |
420 | /* |
421 | * Only used when GVT-g is enabled now. When GVT-g is disabled, | |
422 | * The compiler should eliminate this function as dead-code. | |
423 | */ | |
424 | if (!IS_ENABLED(CONFIG_DRM_I915_GVT)) | |
425 | return; | |
6daccb0b | 426 | |
3fc03069 CD |
427 | atomic_notifier_call_chain(&rq->engine->context_status_notifier, |
428 | status, rq); | |
84b790f8 BW |
429 | } |
430 | ||
31b61f0e CW |
431 | static void reset_active(struct i915_request *rq, |
432 | struct intel_engine_cs *engine) | |
433 | { | |
9f3ccd40 | 434 | struct intel_context * const ce = rq->context; |
31b61f0e CW |
435 | u32 head; |
436 | ||
437 | /* | |
438 | * The executing context has been cancelled. We want to prevent | |
439 | * further execution along this context and propagate the error on | |
440 | * to anything depending on its results. | |
441 | * | |
442 | * In __i915_request_submit(), we apply the -EIO and remove the | |
443 | * requests' payloads for any banned requests. But first, we must | |
444 | * rewind the context back to the start of the incomplete request so | |
445 | * that we do not jump back into the middle of the batch. | |
446 | * | |
447 | * We preserve the breadcrumbs and semaphores of the incomplete | |
448 | * requests so that inter-timeline dependencies (i.e other timelines) | |
449 | * remain correctly ordered. And we defer to __i915_request_submit() | |
450 | * so that all asynchronous waits are correctly handled. | |
451 | */ | |
7904e081 | 452 | ENGINE_TRACE(engine, "{ reset rq=%llx:%lld }\n", |
639f2f24 | 453 | rq->fence.context, rq->fence.seqno); |
31b61f0e CW |
454 | |
455 | /* On resubmission of the active request, payload will be scrubbed */ | |
6f0726b4 | 456 | if (__i915_request_is_complete(rq)) |
31b61f0e CW |
457 | head = rq->tail; |
458 | else | |
7904e081 | 459 | head = __active_request(ce->timeline, rq, -EIO)->head; |
42827350 | 460 | head = intel_ring_wrap(ce->ring, head); |
31b61f0e CW |
461 | |
462 | /* Scrub the context image to prevent replaying the previous batch */ | |
a0d3fdb6 | 463 | lrc_init_regs(ce, engine, true); |
31b61f0e CW |
464 | |
465 | /* We've switched away, so this should be a no-op, but intent matters */ | |
a0d3fdb6 | 466 | ce->lrc.lrca = lrc_update_regs(ce, engine, head); |
1883a0a4 TU |
467 | } |
468 | ||
38b237ea CW |
469 | static bool bad_request(const struct i915_request *rq) |
470 | { | |
471 | return rq->fence.error && i915_request_started(rq); | |
472 | } | |
473 | ||
e7326336 | 474 | static struct intel_engine_cs * |
df403069 CW |
475 | __execlists_schedule_in(struct i915_request *rq) |
476 | { | |
477 | struct intel_engine_cs * const engine = rq->engine; | |
9f3ccd40 | 478 | struct intel_context * const ce = rq->context; |
df403069 CW |
479 | |
480 | intel_context_get(ce); | |
481 | ||
9c080b0f CW |
482 | if (unlikely(intel_context_is_closed(ce) && |
483 | !intel_engine_has_heartbeat(engine))) | |
45c64ecf | 484 | intel_context_set_exiting(ce); |
9c080b0f | 485 | |
45c64ecf | 486 | if (unlikely(!intel_context_is_schedulable(ce) || bad_request(rq))) |
31b61f0e CW |
487 | reset_active(rq, engine); |
488 | ||
b0b10248 | 489 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) |
a0d3fdb6 | 490 | lrc_check_regs(ce, engine, "before"); |
b0b10248 | 491 | |
2935ed53 CW |
492 | if (ce->tag) { |
493 | /* Use a fixed tag for OA and friends */ | |
5c4a53e3 | 494 | GEM_BUG_ON(ce->tag <= BITS_PER_LONG); |
2632f174 | 495 | ce->lrc.ccid = ce->tag; |
50a9ea08 SS |
496 | } else if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) { |
497 | /* We don't need a strict matching tag, just different values */ | |
498 | unsigned int tag = ffs(READ_ONCE(engine->context_tag)); | |
499 | ||
500 | GEM_BUG_ON(tag == 0 || tag >= BITS_PER_LONG); | |
501 | clear_bit(tag - 1, &engine->context_tag); | |
502 | ce->lrc.ccid = tag << (XEHP_SW_CTX_ID_SHIFT - 32); | |
503 | ||
504 | BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID); | |
505 | ||
2935ed53 CW |
506 | } else { |
507 | /* We don't need a strict matching tag, just different values */ | |
177b7a52 | 508 | unsigned int tag = __ffs(engine->context_tag); |
5c4a53e3 | 509 | |
177b7a52 CW |
510 | GEM_BUG_ON(tag >= BITS_PER_LONG); |
511 | __clear_bit(tag, &engine->context_tag); | |
512 | ce->lrc.ccid = (1 + tag) << (GEN11_SW_CTX_ID_SHIFT - 32); | |
5c4a53e3 CW |
513 | |
514 | BUILD_BUG_ON(BITS_PER_LONG > GEN12_MAX_CONTEXT_HW_ID); | |
2935ed53 CW |
515 | } |
516 | ||
2632f174 CW |
517 | ce->lrc.ccid |= engine->execlists.ccid; |
518 | ||
93b0e8fe | 519 | __intel_gt_pm_get(engine->gt); |
2c421896 | 520 | if (engine->fw_domain && !engine->fw_active++) |
ac533c56 | 521 | intel_uncore_forcewake_get(engine->uncore, engine->fw_domain); |
df403069 CW |
522 | execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN); |
523 | intel_engine_context_in(engine); | |
524 | ||
177b7a52 CW |
525 | CE_TRACE(ce, "schedule-in, ccid:%x\n", ce->lrc.ccid); |
526 | ||
df403069 CW |
527 | return engine; |
528 | } | |
529 | ||
e7326336 | 530 | static void execlists_schedule_in(struct i915_request *rq, int idx) |
f2605207 | 531 | { |
9f3ccd40 | 532 | struct intel_context * const ce = rq->context; |
df403069 | 533 | struct intel_engine_cs *old; |
f2605207 | 534 | |
df403069 | 535 | GEM_BUG_ON(!intel_engine_pm_is_awake(rq->engine)); |
22b7a426 | 536 | trace_i915_request_in(rq, idx); |
f2605207 | 537 | |
177b7a52 CW |
538 | old = ce->inflight; |
539 | if (!old) | |
540 | old = __execlists_schedule_in(rq); | |
541 | WRITE_ONCE(ce->inflight, ptr_inc(old)); | |
22b7a426 | 542 | |
22b7a426 | 543 | GEM_BUG_ON(intel_context_inflight(ce) != rq->engine); |
73fd9d38 TU |
544 | } |
545 | ||
f81475bb CW |
546 | static void |
547 | resubmit_virtual_request(struct i915_request *rq, struct virtual_engine *ve) | |
78e41ddd | 548 | { |
bab0557c CW |
549 | struct intel_engine_cs *engine = rq->engine; |
550 | ||
349a2bc5 | 551 | spin_lock_irq(&engine->sched_engine->lock); |
f81475bb CW |
552 | |
553 | clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); | |
554 | WRITE_ONCE(rq->engine, &ve->base); | |
555 | ve->base.submit_request(rq); | |
556 | ||
349a2bc5 | 557 | spin_unlock_irq(&engine->sched_engine->lock); |
f81475bb CW |
558 | } |
559 | ||
560 | static void kick_siblings(struct i915_request *rq, struct intel_context *ce) | |
561 | { | |
562 | struct virtual_engine *ve = container_of(ce, typeof(*ve), context); | |
563 | struct intel_engine_cs *engine = rq->engine; | |
564 | ||
2b2985a4 CW |
565 | /* |
566 | * After this point, the rq may be transferred to a new sibling, so | |
567 | * before we clear ce->inflight make sure that the context has been | |
568 | * removed from the b->signalers and furthermore we need to make sure | |
569 | * that the concurrent iterator in signal_irq_work is no longer | |
570 | * following ce->signal_link. | |
571 | */ | |
572 | if (!list_empty(&ce->signals)) | |
573 | intel_context_remove_breadcrumbs(ce, engine->breadcrumbs); | |
574 | ||
f81475bb CW |
575 | /* |
576 | * This engine is now too busy to run this virtual request, so | |
577 | * see if we can find an alternative engine for it to execute on. | |
578 | * Once a request has become bonded to this engine, we treat it the | |
579 | * same as other native request. | |
580 | */ | |
581 | if (i915_request_in_priority_queue(rq) && | |
582 | rq->execution_mask != engine->mask) | |
583 | resubmit_virtual_request(rq, ve); | |
584 | ||
2efa2c52 | 585 | if (READ_ONCE(ve->request)) |
22916bad | 586 | tasklet_hi_schedule(&ve->base.sched_engine->tasklet); |
78e41ddd CW |
587 | } |
588 | ||
e7326336 CW |
589 | static void __execlists_schedule_out(struct i915_request * const rq, |
590 | struct intel_context * const ce) | |
73fd9d38 | 591 | { |
177b7a52 CW |
592 | struct intel_engine_cs * const engine = rq->engine; |
593 | unsigned int ccid; | |
22b7a426 | 594 | |
31b61f0e | 595 | /* |
349a2bc5 | 596 | * NB process_csb() is not under the engine->sched_engine->lock and hence |
31b61f0e CW |
597 | * schedule_out can race with schedule_in meaning that we should |
598 | * refrain from doing non-trivial work here. | |
599 | */ | |
600 | ||
177b7a52 | 601 | CE_TRACE(ce, "schedule-out, ccid:%x\n", ce->lrc.ccid); |
e7326336 | 602 | GEM_BUG_ON(ce->inflight != engine); |
177b7a52 | 603 | |
89db9537 | 604 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) |
a0d3fdb6 | 605 | lrc_check_regs(ce, engine, "after"); |
89db9537 | 606 | |
4f88f874 CW |
607 | /* |
608 | * If we have just completed this context, the engine may now be | |
609 | * idle and we want to re-enter powersaving. | |
610 | */ | |
b1ad5f6d | 611 | if (intel_timeline_is_last(ce->timeline, rq) && |
6f0726b4 | 612 | __i915_request_is_complete(rq)) |
4f88f874 CW |
613 | intel_engine_add_retire(engine, ce->timeline); |
614 | ||
177b7a52 | 615 | ccid = ce->lrc.ccid; |
50a9ea08 SS |
616 | if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) { |
617 | ccid >>= XEHP_SW_CTX_ID_SHIFT - 32; | |
618 | ccid &= XEHP_MAX_CONTEXT_HW_ID; | |
619 | } else { | |
620 | ccid >>= GEN11_SW_CTX_ID_SHIFT - 32; | |
621 | ccid &= GEN12_MAX_CONTEXT_HW_ID; | |
622 | } | |
623 | ||
5c4a53e3 CW |
624 | if (ccid < BITS_PER_LONG) { |
625 | GEM_BUG_ON(ccid == 0); | |
626 | GEM_BUG_ON(test_bit(ccid - 1, &engine->context_tag)); | |
177b7a52 | 627 | __set_bit(ccid - 1, &engine->context_tag); |
5c4a53e3 | 628 | } |
df403069 CW |
629 | intel_engine_context_out(engine); |
630 | execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT); | |
2c421896 | 631 | if (engine->fw_domain && !--engine->fw_active) |
ac533c56 | 632 | intel_uncore_forcewake_put(engine->uncore, engine->fw_domain); |
07779a76 | 633 | intel_gt_pm_put_async(engine->gt); |
22b7a426 | 634 | |
df403069 CW |
635 | /* |
636 | * If this is part of a virtual engine, its next request may | |
637 | * have been blocked waiting for access to the active context. | |
638 | * We have to kick all the siblings again in case we need to | |
639 | * switch (e.g. the next request is not runnable on this | |
640 | * engine). Hopefully, we will already have submitted the next | |
641 | * request before the tasklet runs and do not need to rebuild | |
642 | * each virtual tree and kick everyone again. | |
643 | */ | |
644 | if (ce->engine != engine) | |
645 | kick_siblings(rq, ce); | |
e7326336 CW |
646 | |
647 | WRITE_ONCE(ce->inflight, NULL); | |
648 | intel_context_put(ce); | |
df403069 | 649 | } |
22b7a426 | 650 | |
e7326336 | 651 | static inline void execlists_schedule_out(struct i915_request *rq) |
df403069 | 652 | { |
9f3ccd40 | 653 | struct intel_context * const ce = rq->context; |
12fdaf19 | 654 | |
df403069 | 655 | trace_i915_request_out(rq); |
df403069 | 656 | |
177b7a52 CW |
657 | GEM_BUG_ON(!ce->inflight); |
658 | ce->inflight = ptr_dec(ce->inflight); | |
e7326336 CW |
659 | if (!__intel_context_inflight_count(ce->inflight)) |
660 | __execlists_schedule_out(rq, ce); | |
22b7a426 CW |
661 | |
662 | i915_request_put(rq); | |
73fd9d38 TU |
663 | } |
664 | ||
a5c89f7c MB |
665 | static u32 map_i915_prio_to_lrc_desc_prio(int prio) |
666 | { | |
667 | if (prio > I915_PRIORITY_NORMAL) | |
668 | return GEN12_CTX_PRIORITY_HIGH; | |
669 | else if (prio < I915_PRIORITY_NORMAL) | |
670 | return GEN12_CTX_PRIORITY_LOW; | |
671 | else | |
672 | return GEN12_CTX_PRIORITY_NORMAL; | |
673 | } | |
674 | ||
82c69bf5 | 675 | static u64 execlists_update_context(struct i915_request *rq) |
ae1250b9 | 676 | { |
9f3ccd40 | 677 | struct intel_context *ce = rq->context; |
adfadb56 | 678 | u64 desc; |
5ba32c7b | 679 | u32 tail, prev; |
ae1250b9 | 680 | |
adfadb56 MR |
681 | desc = ce->lrc.desc; |
682 | if (rq->engine->flags & I915_ENGINE_HAS_EU_PRIORITY) | |
a5c89f7c | 683 | desc |= map_i915_prio_to_lrc_desc_prio(rq_prio(rq)); |
adfadb56 | 684 | |
82c69bf5 CW |
685 | /* |
686 | * WaIdleLiteRestore:bdw,skl | |
687 | * | |
688 | * We should never submit the context with the same RING_TAIL twice | |
689 | * just in case we submit an empty ring, which confuses the HW. | |
690 | * | |
691 | * We append a couple of NOOPs (gen8_emit_wa_tail) after the end of | |
692 | * the normal request to be able to always advance the RING_TAIL on | |
693 | * subsequent resubmissions (for lite restore). Should that fail us, | |
694 | * and we try and submit the same tail again, force the context | |
695 | * reload. | |
5ba32c7b CW |
696 | * |
697 | * If we need to return to a preempted context, we need to skip the | |
698 | * lite-restore and force it to reload the RING_TAIL. Otherwise, the | |
699 | * HW has a tendency to ignore us rewinding the TAIL to the end of | |
700 | * an earlier request. | |
82c69bf5 | 701 | */ |
8ab3a381 CW |
702 | GEM_BUG_ON(ce->lrc_reg_state[CTX_RING_TAIL] != rq->ring->tail); |
703 | prev = rq->ring->tail; | |
82c69bf5 | 704 | tail = intel_ring_set_tail(rq->ring, rq->tail); |
5ba32c7b | 705 | if (unlikely(intel_ring_direction(rq->ring, tail, prev) <= 0)) |
82c69bf5 CW |
706 | desc |= CTX_DESC_FORCE_RESTORE; |
707 | ce->lrc_reg_state[CTX_RING_TAIL] = tail; | |
708 | rq->tail = rq->wa_tail; | |
70c2a24d | 709 | |
987abd5c CW |
710 | /* |
711 | * Make sure the context image is complete before we submit it to HW. | |
712 | * | |
713 | * Ostensibly, writes (including the WCB) should be flushed prior to | |
714 | * an uncached write such as our mmio register access, the empirical | |
715 | * evidence (esp. on Braswell) suggests that the WC write into memory | |
716 | * may not be visible to the HW prior to the completion of the UC | |
717 | * register write and that we may begin execution from the context | |
718 | * before its image is complete leading to invalid PD chasing. | |
719 | */ | |
69a48c1d | 720 | wmb(); |
22b7a426 | 721 | |
2632f174 | 722 | ce->lrc.desc &= ~CTX_DESC_FORCE_RESTORE; |
22b7a426 | 723 | return desc; |
ae1250b9 OM |
724 | } |
725 | ||
e7326336 | 726 | static void write_desc(struct intel_engine_execlists *execlists, u64 desc, u32 port) |
beecec90 | 727 | { |
05f0addd TD |
728 | if (execlists->ctrl_reg) { |
729 | writel(lower_32_bits(desc), execlists->submit_reg + port * 2); | |
730 | writel(upper_32_bits(desc), execlists->submit_reg + port * 2 + 1); | |
731 | } else { | |
732 | writel(upper_32_bits(desc), execlists->submit_reg); | |
733 | writel(lower_32_bits(desc), execlists->submit_reg); | |
734 | } | |
beecec90 CW |
735 | } |
736 | ||
8b6d457f CW |
737 | static __maybe_unused char * |
738 | dump_port(char *buf, int buflen, const char *prefix, struct i915_request *rq) | |
739 | { | |
740 | if (!rq) | |
741 | return ""; | |
742 | ||
2632f174 | 743 | snprintf(buf, buflen, "%sccid:%x %llx:%lld%s prio %d", |
8b6d457f | 744 | prefix, |
2632f174 | 745 | rq->context->lrc.ccid, |
8b6d457f | 746 | rq->fence.context, rq->fence.seqno, |
6f0726b4 CW |
747 | __i915_request_is_complete(rq) ? "!" : |
748 | __i915_request_has_started(rq) ? "*" : | |
8b6d457f CW |
749 | "", |
750 | rq_prio(rq)); | |
751 | ||
752 | return buf; | |
753 | } | |
754 | ||
e7326336 | 755 | static __maybe_unused noinline void |
22b7a426 CW |
756 | trace_ports(const struct intel_engine_execlists *execlists, |
757 | const char *msg, | |
758 | struct i915_request * const *ports) | |
759 | { | |
760 | const struct intel_engine_cs *engine = | |
761 | container_of(execlists, typeof(*engine), execlists); | |
8b6d457f | 762 | char __maybe_unused p0[40], p1[40]; |
22b7a426 | 763 | |
198d2533 CW |
764 | if (!ports[0]) |
765 | return; | |
766 | ||
8b6d457f CW |
767 | ENGINE_TRACE(engine, "%s { %s%s }\n", msg, |
768 | dump_port(p0, sizeof(p0), "", ports[0]), | |
769 | dump_port(p1, sizeof(p1), ", ", ports[1])); | |
22b7a426 CW |
770 | } |
771 | ||
e7326336 | 772 | static bool |
22916bad | 773 | reset_in_progress(const struct intel_engine_cs *engine) |
f1042cc8 | 774 | { |
22916bad | 775 | return unlikely(!__tasklet_is_enabled(&engine->sched_engine->tasklet)); |
f1042cc8 CW |
776 | } |
777 | ||
e7326336 | 778 | static __maybe_unused noinline bool |
22b7a426 CW |
779 | assert_pending_valid(const struct intel_engine_execlists *execlists, |
780 | const char *msg) | |
781 | { | |
f6a7c21c CW |
782 | struct intel_engine_cs *engine = |
783 | container_of(execlists, typeof(*engine), execlists); | |
90a79a91 | 784 | struct i915_request * const *port, *rq, *prev = NULL; |
22b7a426 | 785 | struct intel_context *ce = NULL; |
f6a7c21c | 786 | u32 ccid = -1; |
22b7a426 CW |
787 | |
788 | trace_ports(execlists, msg, execlists->pending); | |
789 | ||
f1042cc8 | 790 | /* We may be messing around with the lists during reset, lalala */ |
22916bad | 791 | if (reset_in_progress(engine)) |
f1042cc8 CW |
792 | return true; |
793 | ||
c97fb526 | 794 | if (!execlists->pending[0]) { |
f6a7c21c CW |
795 | GEM_TRACE_ERR("%s: Nothing pending for promotion!\n", |
796 | engine->name); | |
df403069 | 797 | return false; |
c97fb526 | 798 | } |
df403069 | 799 | |
c97fb526 | 800 | if (execlists->pending[execlists_num_ports(execlists)]) { |
f6a7c21c CW |
801 | GEM_TRACE_ERR("%s: Excess pending[%d] for promotion!\n", |
802 | engine->name, execlists_num_ports(execlists)); | |
22b7a426 | 803 | return false; |
c97fb526 | 804 | } |
22b7a426 CW |
805 | |
806 | for (port = execlists->pending; (rq = *port); port++) { | |
c95d31c3 CW |
807 | unsigned long flags; |
808 | bool ok = true; | |
809 | ||
80aac91b CW |
810 | GEM_BUG_ON(!kref_read(&rq->fence.refcount)); |
811 | GEM_BUG_ON(!i915_request_is_active(rq)); | |
812 | ||
9f3ccd40 | 813 | if (ce == rq->context) { |
f6a7c21c CW |
814 | GEM_TRACE_ERR("%s: Dup context:%llx in pending[%zd]\n", |
815 | engine->name, | |
38098750 | 816 | ce->timeline->fence_context, |
c97fb526 | 817 | port - execlists->pending); |
22b7a426 | 818 | return false; |
c97fb526 | 819 | } |
9f3ccd40 | 820 | ce = rq->context; |
c95d31c3 | 821 | |
f6a7c21c CW |
822 | if (ccid == ce->lrc.ccid) { |
823 | GEM_TRACE_ERR("%s: Dup ccid:%x context:%llx in pending[%zd]\n", | |
824 | engine->name, | |
825 | ccid, ce->timeline->fence_context, | |
826 | port - execlists->pending); | |
827 | return false; | |
828 | } | |
829 | ccid = ce->lrc.ccid; | |
830 | ||
15db5fcc | 831 | /* |
8733a063 TU |
832 | * Sentinels are supposed to be the last request so they flush |
833 | * the current execution off the HW. Check that they are the only | |
834 | * request in the pending submission. | |
90a79a91 TU |
835 | * |
836 | * NB: Due to the async nature of preempt-to-busy and request | |
837 | * cancellation we need to handle the case where request | |
838 | * becomes a sentinel in parallel to CSB processing. | |
15db5fcc | 839 | */ |
90a79a91 TU |
840 | if (prev && i915_request_has_sentinel(prev) && |
841 | !READ_ONCE(prev->fence.error)) { | |
f6a7c21c CW |
842 | GEM_TRACE_ERR("%s: context:%llx after sentinel in pending[%zd]\n", |
843 | engine->name, | |
15db5fcc CW |
844 | ce->timeline->fence_context, |
845 | port - execlists->pending); | |
846 | return false; | |
847 | } | |
90a79a91 | 848 | prev = rq; |
15db5fcc | 849 | |
f81475bb CW |
850 | /* |
851 | * We want virtual requests to only be in the first slot so | |
852 | * that they are never stuck behind a hog and can be immediately | |
853 | * transferred onto the next idle engine. | |
854 | */ | |
855 | if (rq->execution_mask != engine->mask && | |
856 | port != execlists->pending) { | |
857 | GEM_TRACE_ERR("%s: virtual engine:%llx not in prime position[%zd]\n", | |
858 | engine->name, | |
859 | ce->timeline->fence_context, | |
860 | port - execlists->pending); | |
861 | return false; | |
862 | } | |
863 | ||
c95d31c3 | 864 | /* Hold tightly onto the lock to prevent concurrent retires! */ |
49e74c8f CW |
865 | if (!spin_trylock_irqsave(&rq->lock, flags)) |
866 | continue; | |
c95d31c3 | 867 | |
6f0726b4 | 868 | if (__i915_request_is_complete(rq)) |
c95d31c3 | 869 | goto unlock; |
22b7a426 | 870 | |
e6ba7648 CW |
871 | if (i915_active_is_idle(&ce->active) && |
872 | !intel_context_is_barrier(ce)) { | |
f6a7c21c CW |
873 | GEM_TRACE_ERR("%s: Inactive context:%llx in pending[%zd]\n", |
874 | engine->name, | |
38098750 | 875 | ce->timeline->fence_context, |
c97fb526 | 876 | port - execlists->pending); |
c95d31c3 CW |
877 | ok = false; |
878 | goto unlock; | |
c97fb526 CW |
879 | } |
880 | ||
881 | if (!i915_vma_is_pinned(ce->state)) { | |
f6a7c21c CW |
882 | GEM_TRACE_ERR("%s: Unpinned context:%llx in pending[%zd]\n", |
883 | engine->name, | |
38098750 | 884 | ce->timeline->fence_context, |
c97fb526 | 885 | port - execlists->pending); |
c95d31c3 CW |
886 | ok = false; |
887 | goto unlock; | |
c97fb526 | 888 | } |
22b7a426 | 889 | |
c97fb526 | 890 | if (!i915_vma_is_pinned(ce->ring->vma)) { |
f6a7c21c CW |
891 | GEM_TRACE_ERR("%s: Unpinned ring:%llx in pending[%zd]\n", |
892 | engine->name, | |
38098750 | 893 | ce->timeline->fence_context, |
c97fb526 | 894 | port - execlists->pending); |
c95d31c3 CW |
895 | ok = false; |
896 | goto unlock; | |
c97fb526 | 897 | } |
c95d31c3 CW |
898 | |
899 | unlock: | |
900 | spin_unlock_irqrestore(&rq->lock, flags); | |
901 | if (!ok) | |
902 | return false; | |
22b7a426 CW |
903 | } |
904 | ||
905 | return ce; | |
906 | } | |
907 | ||
70c2a24d | 908 | static void execlists_submit_ports(struct intel_engine_cs *engine) |
bbd6c47e | 909 | { |
05f0addd | 910 | struct intel_engine_execlists *execlists = &engine->execlists; |
77f0d0e9 | 911 | unsigned int n; |
bbd6c47e | 912 | |
22b7a426 CW |
913 | GEM_BUG_ON(!assert_pending_valid(execlists, "submit")); |
914 | ||
d78d3343 CW |
915 | /* |
916 | * We can skip acquiring intel_runtime_pm_get() here as it was taken | |
917 | * on our behalf by the request (see i915_gem_mark_busy()) and it will | |
918 | * not be relinquished until the device is idle (see | |
919 | * i915_gem_idle_work_handler()). As a precaution, we make sure | |
920 | * that all ELSP are drained i.e. we have processed the CSB, | |
921 | * before allowing ourselves to idle and calling intel_runtime_pm_put(). | |
922 | */ | |
5f22e5b3 | 923 | GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); |
d78d3343 | 924 | |
05f0addd TD |
925 | /* |
926 | * ELSQ note: the submit queue is not cleared after being submitted | |
927 | * to the HW so we need to make sure we always clean it up. This is | |
928 | * currently ensured by the fact that we always write the same number | |
929 | * of elsq entries, keep this in mind before changing the loop below. | |
930 | */ | |
931 | for (n = execlists_num_ports(execlists); n--; ) { | |
22b7a426 | 932 | struct i915_request *rq = execlists->pending[n]; |
bbd6c47e | 933 | |
22b7a426 CW |
934 | write_desc(execlists, |
935 | rq ? execlists_update_context(rq) : 0, | |
936 | n); | |
77f0d0e9 | 937 | } |
05f0addd TD |
938 | |
939 | /* we need to manually load the submit queue */ | |
940 | if (execlists->ctrl_reg) | |
941 | writel(EL_CTRL_LOAD, execlists->ctrl_reg); | |
bbd6c47e CW |
942 | } |
943 | ||
1fc44d9b | 944 | static bool ctx_single_port_submission(const struct intel_context *ce) |
84b790f8 | 945 | { |
70c2a24d | 946 | return (IS_ENABLED(CONFIG_DRM_I915_GVT) && |
9f3ccd40 | 947 | intel_context_force_single_submission(ce)); |
70c2a24d | 948 | } |
84b790f8 | 949 | |
1fc44d9b CW |
950 | static bool can_merge_ctx(const struct intel_context *prev, |
951 | const struct intel_context *next) | |
70c2a24d CW |
952 | { |
953 | if (prev != next) | |
954 | return false; | |
26720ab9 | 955 | |
70c2a24d CW |
956 | if (ctx_single_port_submission(prev)) |
957 | return false; | |
26720ab9 | 958 | |
70c2a24d | 959 | return true; |
84b790f8 BW |
960 | } |
961 | ||
fa192d90 CW |
962 | static unsigned long i915_request_flags(const struct i915_request *rq) |
963 | { | |
964 | return READ_ONCE(rq->fence.flags); | |
965 | } | |
966 | ||
c10c78ad CW |
967 | static bool can_merge_rq(const struct i915_request *prev, |
968 | const struct i915_request *next) | |
969 | { | |
22b7a426 | 970 | GEM_BUG_ON(prev == next); |
c10c78ad CW |
971 | GEM_BUG_ON(!assert_priority_queue(prev, next)); |
972 | ||
c0bb487d CW |
973 | /* |
974 | * We do not submit known completed requests. Therefore if the next | |
975 | * request is already completed, we can pretend to merge it in | |
976 | * with the previous context (and we will skip updating the ELSP | |
977 | * and tracking). Thus hopefully keeping the ELSP full with active | |
978 | * contexts, despite the best efforts of preempt-to-busy to confuse | |
979 | * us. | |
980 | */ | |
6f0726b4 | 981 | if (__i915_request_is_complete(next)) |
c0bb487d CW |
982 | return true; |
983 | ||
8f922e42 | 984 | if (unlikely((i915_request_flags(prev) | i915_request_flags(next)) & |
72ff2b8d CW |
985 | (BIT(I915_FENCE_FLAG_NOPREEMPT) | |
986 | BIT(I915_FENCE_FLAG_SENTINEL)))) | |
d8ad5f52 CW |
987 | return false; |
988 | ||
9f3ccd40 | 989 | if (!can_merge_ctx(prev->context, next->context)) |
c10c78ad CW |
990 | return false; |
991 | ||
1eaa251b | 992 | GEM_BUG_ON(i915_seqno_passed(prev->fence.seqno, next->fence.seqno)); |
c10c78ad CW |
993 | return true; |
994 | } | |
995 | ||
6d06779e CW |
996 | static bool virtual_matches(const struct virtual_engine *ve, |
997 | const struct i915_request *rq, | |
998 | const struct intel_engine_cs *engine) | |
999 | { | |
754f7a0b | 1000 | const struct intel_engine_cs *inflight; |
6d06779e | 1001 | |
0e58de9f CW |
1002 | if (!rq) |
1003 | return false; | |
1004 | ||
78e41ddd CW |
1005 | if (!(rq->execution_mask & engine->mask)) /* We peeked too soon! */ |
1006 | return false; | |
1007 | ||
6d06779e CW |
1008 | /* |
1009 | * We track when the HW has completed saving the context image | |
1010 | * (i.e. when we have seen the final CS event switching out of | |
1011 | * the context) and must not overwrite the context image before | |
1012 | * then. This restricts us to only using the active engine | |
1013 | * while the previous virtualized request is inflight (so | |
1014 | * we reuse the register offsets). This is a very small | |
1015 | * hystersis on the greedy seelction algorithm. | |
1016 | */ | |
22b7a426 | 1017 | inflight = intel_context_inflight(&ve->context); |
754f7a0b | 1018 | if (inflight && inflight != engine) |
6d06779e CW |
1019 | return false; |
1020 | ||
1021 | return true; | |
1022 | } | |
1023 | ||
64b7a3fa CW |
1024 | static struct virtual_engine * |
1025 | first_virtual_engine(struct intel_engine_cs *engine) | |
1026 | { | |
1027 | struct intel_engine_execlists *el = &engine->execlists; | |
1028 | struct rb_node *rb = rb_first_cached(&el->virtual); | |
1029 | ||
1030 | while (rb) { | |
1031 | struct virtual_engine *ve = | |
1032 | rb_entry(rb, typeof(*ve), nodes[engine->id].rb); | |
1033 | struct i915_request *rq = READ_ONCE(ve->request); | |
1034 | ||
1035 | /* lazily cleanup after another engine handled rq */ | |
2efa2c52 | 1036 | if (!rq || !virtual_matches(ve, rq, engine)) { |
64b7a3fa CW |
1037 | rb_erase_cached(rb, &el->virtual); |
1038 | RB_CLEAR_NODE(rb); | |
1039 | rb = rb_first_cached(&el->virtual); | |
1040 | continue; | |
1041 | } | |
1042 | ||
64b7a3fa CW |
1043 | return ve; |
1044 | } | |
1045 | ||
1046 | return NULL; | |
1047 | } | |
1048 | ||
56f581ba CW |
1049 | static void virtual_xfer_context(struct virtual_engine *ve, |
1050 | struct intel_engine_cs *engine) | |
1051 | { | |
1052 | unsigned int n; | |
1053 | ||
1054 | if (likely(engine == ve->siblings[0])) | |
1055 | return; | |
1056 | ||
1057 | GEM_BUG_ON(READ_ONCE(ve->context.inflight)); | |
1058 | if (!intel_engine_has_relative_mmio(engine)) | |
a0d3fdb6 | 1059 | lrc_update_offsets(&ve->context, engine); |
56f581ba CW |
1060 | |
1061 | /* | |
1062 | * Move the bound engine to the top of the list for | |
1063 | * future execution. We then kick this tasklet first | |
1064 | * before checking others, so that we preferentially | |
1065 | * reuse this set of bound registers. | |
1066 | */ | |
1067 | for (n = 1; n < ve->num_siblings; n++) { | |
1068 | if (ve->siblings[n] == engine) { | |
1069 | swap(ve->siblings[n], ve->siblings[0]); | |
1070 | break; | |
1071 | } | |
1072 | } | |
1073 | } | |
1074 | ||
07bfe6bf | 1075 | static void defer_request(struct i915_request *rq, struct list_head * const pl) |
8ee36e04 | 1076 | { |
07bfe6bf | 1077 | LIST_HEAD(list); |
8ee36e04 CW |
1078 | |
1079 | /* | |
1080 | * We want to move the interrupted request to the back of | |
1081 | * the round-robin list (i.e. its priority level), but | |
1082 | * in doing so, we must then move all requests that were in | |
1083 | * flight and were waiting for the interrupted request to | |
1084 | * be run after it again. | |
1085 | */ | |
07bfe6bf CW |
1086 | do { |
1087 | struct i915_dependency *p; | |
8ee36e04 | 1088 | |
07bfe6bf CW |
1089 | GEM_BUG_ON(i915_request_is_active(rq)); |
1090 | list_move_tail(&rq->sched.link, pl); | |
8ee36e04 | 1091 | |
f14f27b1 | 1092 | for_each_waiter(p, rq) { |
07bfe6bf CW |
1093 | struct i915_request *w = |
1094 | container_of(p->waiter, typeof(*w), sched); | |
8ee36e04 | 1095 | |
6b6cd2eb CW |
1096 | if (p->flags & I915_DEPENDENCY_WEAK) |
1097 | continue; | |
1098 | ||
07bfe6bf CW |
1099 | /* Leave semaphores spinning on the other engines */ |
1100 | if (w->engine != rq->engine) | |
1101 | continue; | |
8ee36e04 | 1102 | |
07bfe6bf | 1103 | /* No waiter should start before its signaler */ |
795d4d7f | 1104 | GEM_BUG_ON(i915_request_has_initial_breadcrumb(w) && |
6f0726b4 CW |
1105 | __i915_request_has_started(w) && |
1106 | !__i915_request_is_complete(rq)); | |
8ee36e04 | 1107 | |
32ff621f CW |
1108 | if (!i915_request_is_ready(w)) |
1109 | continue; | |
8ee36e04 | 1110 | |
07bfe6bf CW |
1111 | if (rq_prio(w) < rq_prio(rq)) |
1112 | continue; | |
1113 | ||
1114 | GEM_BUG_ON(rq_prio(w) > rq_prio(rq)); | |
b3f0c15a | 1115 | GEM_BUG_ON(i915_request_is_active(w)); |
07bfe6bf CW |
1116 | list_move_tail(&w->sched.link, &list); |
1117 | } | |
1118 | ||
1119 | rq = list_first_entry_or_null(&list, typeof(*rq), sched.link); | |
1120 | } while (rq); | |
8ee36e04 CW |
1121 | } |
1122 | ||
1123 | static void defer_active(struct intel_engine_cs *engine) | |
1124 | { | |
1125 | struct i915_request *rq; | |
1126 | ||
1127 | rq = __unwind_incomplete_requests(engine); | |
1128 | if (!rq) | |
1129 | return; | |
1130 | ||
d2a31d02 MB |
1131 | defer_request(rq, i915_sched_lookup_priolist(engine->sched_engine, |
1132 | rq_prio(rq))); | |
8ee36e04 CW |
1133 | } |
1134 | ||
c4e8ba73 CW |
1135 | static bool |
1136 | timeslice_yield(const struct intel_engine_execlists *el, | |
1137 | const struct i915_request *rq) | |
1138 | { | |
1139 | /* | |
1140 | * Once bitten, forever smitten! | |
1141 | * | |
1142 | * If the active context ever busy-waited on a semaphore, | |
1143 | * it will be treated as a hog until the end of its timeslice (i.e. | |
1144 | * until it is scheduled out and replaced by a new submission, | |
1145 | * possibly even its own lite-restore). The HW only sends an interrupt | |
1146 | * on the first miss, and we do know if that semaphore has been | |
1147 | * signaled, or even if it is now stuck on another semaphore. Play | |
1148 | * safe, yield if it might be stuck -- it will be given a fresh | |
1149 | * timeslice in the near future. | |
1150 | */ | |
2632f174 | 1151 | return rq->context->lrc.ccid == READ_ONCE(el->yield); |
c4e8ba73 CW |
1152 | } |
1153 | ||
4386b8e5 CW |
1154 | static bool needs_timeslice(const struct intel_engine_cs *engine, |
1155 | const struct i915_request *rq) | |
c4e8ba73 | 1156 | { |
4386b8e5 CW |
1157 | if (!intel_engine_has_timeslices(engine)) |
1158 | return false; | |
df403069 | 1159 | |
4386b8e5 CW |
1160 | /* If not currently active, or about to switch, wait for next event */ |
1161 | if (!rq || __i915_request_is_complete(rq)) | |
1162 | return false; | |
df403069 | 1163 | |
4386b8e5 CW |
1164 | /* We do not need to start the timeslice until after the ACK */ |
1165 | if (READ_ONCE(engine->execlists.pending[0])) | |
1166 | return false; | |
b79029b2 | 1167 | |
4386b8e5 | 1168 | /* If ELSP[1] is occupied, always check to see if worth slicing */ |
349a2bc5 MB |
1169 | if (!list_is_last_rcu(&rq->sched.link, |
1170 | &engine->sched_engine->requests)) { | |
4386b8e5 CW |
1171 | ENGINE_TRACE(engine, "timeslice required for second inflight context\n"); |
1172 | return true; | |
1173 | } | |
df403069 | 1174 | |
4386b8e5 | 1175 | /* Otherwise, ELSP[0] is by itself, but may be waiting in the queue */ |
074bb195 | 1176 | if (!i915_sched_engine_is_empty(engine->sched_engine)) { |
4386b8e5 CW |
1177 | ENGINE_TRACE(engine, "timeslice required for queue\n"); |
1178 | return true; | |
1179 | } | |
8ee36e04 | 1180 | |
4386b8e5 CW |
1181 | if (!RB_EMPTY_ROOT(&engine->execlists.virtual.rb_root)) { |
1182 | ENGINE_TRACE(engine, "timeslice required for virtual\n"); | |
1183 | return true; | |
1184 | } | |
b79029b2 | 1185 | |
4386b8e5 | 1186 | return false; |
b79029b2 CW |
1187 | } |
1188 | ||
4386b8e5 CW |
1189 | static bool |
1190 | timeslice_expired(struct intel_engine_cs *engine, const struct i915_request *rq) | |
b79029b2 | 1191 | { |
4386b8e5 | 1192 | const struct intel_engine_execlists *el = &engine->execlists; |
f53ae29c | 1193 | |
4386b8e5 CW |
1194 | if (i915_request_has_nopreempt(rq) && __i915_request_has_started(rq)) |
1195 | return false; | |
b79029b2 | 1196 | |
4386b8e5 CW |
1197 | if (!needs_timeslice(engine, rq)) |
1198 | return false; | |
f53ae29c | 1199 | |
4386b8e5 | 1200 | return timer_expired(&el->timer) || timeslice_yield(el, rq); |
8ee36e04 CW |
1201 | } |
1202 | ||
4386b8e5 | 1203 | static unsigned long timeslice(const struct intel_engine_cs *engine) |
3df2deed | 1204 | { |
4386b8e5 CW |
1205 | return READ_ONCE(engine->props.timeslice_duration_ms); |
1206 | } | |
3df2deed | 1207 | |
4386b8e5 CW |
1208 | static void start_timeslice(struct intel_engine_cs *engine) |
1209 | { | |
1210 | struct intel_engine_execlists *el = &engine->execlists; | |
1211 | unsigned long duration; | |
3df2deed | 1212 | |
4386b8e5 CW |
1213 | /* Disable the timer if there is nothing to switch to */ |
1214 | duration = 0; | |
1215 | if (needs_timeslice(engine, *el->active)) { | |
1216 | /* Avoid continually prolonging an active timeslice */ | |
1217 | if (timer_active(&el->timer)) { | |
1218 | /* | |
1219 | * If we just submitted a new ELSP after an old | |
1220 | * context, that context may have already consumed | |
1221 | * its timeslice, so recheck. | |
1222 | */ | |
1223 | if (!timer_pending(&el->timer)) | |
22916bad | 1224 | tasklet_hi_schedule(&engine->sched_engine->tasklet); |
4386b8e5 CW |
1225 | return; |
1226 | } | |
3df2deed | 1227 | |
4386b8e5 CW |
1228 | duration = timeslice(engine); |
1229 | } | |
f53ae29c | 1230 | |
4386b8e5 | 1231 | set_timer_ms(&el->timer, duration); |
3df2deed CW |
1232 | } |
1233 | ||
58d1b427 CW |
1234 | static void record_preemption(struct intel_engine_execlists *execlists) |
1235 | { | |
1236 | (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++); | |
1237 | } | |
1238 | ||
60ef5b7a CW |
1239 | static unsigned long active_preempt_timeout(struct intel_engine_cs *engine, |
1240 | const struct i915_request *rq) | |
3a7a92ab | 1241 | { |
3a7a92ab CW |
1242 | if (!rq) |
1243 | return 0; | |
1244 | ||
6ef7d362 CW |
1245 | /* Only allow ourselves to force reset the currently active context */ |
1246 | engine->execlists.preempt_target = rq; | |
1247 | ||
d12acee8 | 1248 | /* Force a fast reset for terminated contexts (ignoring sysfs!) */ |
38b237ea | 1249 | if (unlikely(intel_context_is_banned(rq->context) || bad_request(rq))) |
45c64ecf | 1250 | return INTEL_CONTEXT_BANNED_PREEMPT_TIMEOUT_MS; |
d12acee8 | 1251 | |
3a7a92ab CW |
1252 | return READ_ONCE(engine->props.preempt_timeout_ms); |
1253 | } | |
1254 | ||
60ef5b7a CW |
1255 | static void set_preempt_timeout(struct intel_engine_cs *engine, |
1256 | const struct i915_request *rq) | |
3a7a92ab CW |
1257 | { |
1258 | if (!intel_engine_has_preempt_reset(engine)) | |
1259 | return; | |
1260 | ||
1261 | set_timer_ms(&engine->execlists.preempt, | |
60ef5b7a | 1262 | active_preempt_timeout(engine, rq)); |
3a7a92ab CW |
1263 | } |
1264 | ||
a2dd2ff5 CW |
1265 | static bool completed(const struct i915_request *rq) |
1266 | { | |
1267 | if (i915_request_has_sentinel(rq)) | |
1268 | return false; | |
1269 | ||
1270 | return __i915_request_is_complete(rq); | |
1271 | } | |
1272 | ||
9512f985 | 1273 | static void execlists_dequeue(struct intel_engine_cs *engine) |
acdd884a | 1274 | { |
7a62cc61 | 1275 | struct intel_engine_execlists * const execlists = &engine->execlists; |
3e28d371 | 1276 | struct i915_sched_engine * const sched_engine = engine->sched_engine; |
22b7a426 CW |
1277 | struct i915_request **port = execlists->pending; |
1278 | struct i915_request ** const last_port = port + execlists->port_mask; | |
a2dd2ff5 | 1279 | struct i915_request *last, * const *active; |
64b7a3fa | 1280 | struct virtual_engine *ve; |
20311bd3 | 1281 | struct rb_node *rb; |
70c2a24d CW |
1282 | bool submit = false; |
1283 | ||
9512f985 CW |
1284 | /* |
1285 | * Hardware submission is through 2 ports. Conceptually each port | |
70c2a24d CW |
1286 | * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is |
1287 | * static for a context, and unique to each, so we only execute | |
1288 | * requests belonging to a single context from each ring. RING_HEAD | |
1289 | * is maintained by the CS in the context image, it marks the place | |
1290 | * where it got up to last time, and through RING_TAIL we tell the CS | |
1291 | * where we want to execute up to this time. | |
1292 | * | |
1293 | * In this list the requests are in order of execution. Consecutive | |
1294 | * requests from the same context are adjacent in the ringbuffer. We | |
1295 | * can combine these requests into a single RING_TAIL update: | |
1296 | * | |
1297 | * RING_HEAD...req1...req2 | |
1298 | * ^- RING_TAIL | |
1299 | * since to execute req2 the CS must first execute req1. | |
1300 | * | |
1301 | * Our goal then is to point each port to the end of a consecutive | |
1302 | * sequence of requests as being the most optimal (fewest wake ups | |
1303 | * and context switches) submission. | |
779949f4 | 1304 | */ |
acdd884a | 1305 | |
349a2bc5 | 1306 | spin_lock(&sched_engine->lock); |
16f2941a | 1307 | |
22b7a426 CW |
1308 | /* |
1309 | * If the queue is higher priority than the last | |
1310 | * request in the currently active context, submit afresh. | |
1311 | * We will resubmit again afterwards in case we need to split | |
1312 | * the active context to interject the preemption request, | |
1313 | * i.e. we will retrigger preemption following the ack in case | |
1314 | * of trouble. | |
16f2941a | 1315 | * |
35f3fd81 | 1316 | */ |
a2dd2ff5 CW |
1317 | active = execlists->active; |
1318 | while ((last = *active) && completed(last)) | |
1319 | active++; | |
35f3fd81 | 1320 | |
16f2941a | 1321 | if (last) { |
a2dd2ff5 | 1322 | if (need_preempt(engine, last)) { |
639f2f24 VSD |
1323 | ENGINE_TRACE(engine, |
1324 | "preempting last=%llx:%lld, prio=%d, hint=%d\n", | |
1325 | last->fence.context, | |
1326 | last->fence.seqno, | |
1327 | last->sched.attr.priority, | |
3e28d371 | 1328 | sched_engine->queue_priority_hint); |
58d1b427 CW |
1329 | record_preemption(execlists); |
1330 | ||
22b7a426 CW |
1331 | /* |
1332 | * Don't let the RING_HEAD advance past the breadcrumb | |
1333 | * as we unwind (and until we resubmit) so that we do | |
1334 | * not accidentally tell it to go backwards. | |
1335 | */ | |
1336 | ring_set_paused(engine, 1); | |
f6322edd | 1337 | |
22b7a426 CW |
1338 | /* |
1339 | * Note that we have not stopped the GPU at this point, | |
1340 | * so we are unwinding the incomplete requests as they | |
1341 | * remain inflight and so by the time we do complete | |
1342 | * the preemption, some of the unwound requests may | |
1343 | * complete! | |
1344 | */ | |
1345 | __unwind_incomplete_requests(engine); | |
f6322edd | 1346 | |
22b7a426 | 1347 | last = NULL; |
4386b8e5 | 1348 | } else if (timeslice_expired(engine, last)) { |
639f2f24 | 1349 | ENGINE_TRACE(engine, |
4386b8e5 | 1350 | "expired:%s last=%llx:%lld, prio=%d, hint=%d, yield?=%s\n", |
01fabda8 | 1351 | str_yes_no(timer_expired(&execlists->timer)), |
4386b8e5 CW |
1352 | last->fence.context, last->fence.seqno, |
1353 | rq_prio(last), | |
3e28d371 | 1354 | sched_engine->queue_priority_hint, |
01fabda8 | 1355 | str_yes_no(timeslice_yield(execlists, last))); |
8ee36e04 | 1356 | |
4386b8e5 CW |
1357 | /* |
1358 | * Consume this timeslice; ensure we start a new one. | |
1359 | * | |
1360 | * The timeslice expired, and we will unwind the | |
1361 | * running contexts and recompute the next ELSP. | |
1362 | * If that submit will be the same pair of contexts | |
1363 | * (due to dependency ordering), we will skip the | |
1364 | * submission. If we don't cancel the timer now, | |
1365 | * we will see that the timer has expired and | |
1366 | * reschedule the tasklet; continually until the | |
8ae66490 | 1367 | * next context switch or other preemption event. |
4386b8e5 CW |
1368 | * |
1369 | * Since we have decided to reschedule based on | |
1370 | * consumption of this timeslice, if we submit the | |
1371 | * same context again, grant it a full timeslice. | |
1372 | */ | |
1373 | cancel_timer(&execlists->timer); | |
8ee36e04 CW |
1374 | ring_set_paused(engine, 1); |
1375 | defer_active(engine); | |
1376 | ||
1377 | /* | |
1378 | * Unlike for preemption, if we rewind and continue | |
1379 | * executing the same context as previously active, | |
1380 | * the order of execution will remain the same and | |
1381 | * the tail will only advance. We do not need to | |
1382 | * force a full context restore, as a lite-restore | |
1383 | * is sufficient to resample the monotonic TAIL. | |
1384 | * | |
1385 | * If we switch to any other context, similarly we | |
1386 | * will not rewind TAIL of current context, and | |
1387 | * normal save/restore will preserve state and allow | |
1388 | * us to later continue executing the same request. | |
1389 | */ | |
1390 | last = NULL; | |
22b7a426 CW |
1391 | } else { |
1392 | /* | |
1393 | * Otherwise if we already have a request pending | |
1394 | * for execution after the current one, we can | |
1395 | * just wait until the next CS event before | |
1396 | * queuing more. In either case we will force a | |
1397 | * lite-restore preemption event, but if we wait | |
1398 | * we hopefully coalesce several updates into a single | |
1399 | * submission. | |
1400 | */ | |
a2dd2ff5 | 1401 | if (active[1]) { |
253a774b CW |
1402 | /* |
1403 | * Even if ELSP[1] is occupied and not worthy | |
1404 | * of timeslices, our queue might be. | |
1405 | */ | |
349a2bc5 | 1406 | spin_unlock(&sched_engine->lock); |
22b7a426 | 1407 | return; |
253a774b | 1408 | } |
22b7a426 | 1409 | } |
beecec90 CW |
1410 | } |
1411 | ||
64b7a3fa CW |
1412 | /* XXX virtual is always taking precedence */ |
1413 | while ((ve = first_virtual_engine(engine))) { | |
6d06779e CW |
1414 | struct i915_request *rq; |
1415 | ||
349a2bc5 | 1416 | spin_lock(&ve->base.sched_engine->lock); |
6d06779e CW |
1417 | |
1418 | rq = ve->request; | |
0e58de9f CW |
1419 | if (unlikely(!virtual_matches(ve, rq, engine))) |
1420 | goto unlock; /* lost the race to a sibling */ | |
6d06779e | 1421 | |
6d06779e | 1422 | GEM_BUG_ON(rq->engine != &ve->base); |
9f3ccd40 | 1423 | GEM_BUG_ON(rq->context != &ve->context); |
6d06779e | 1424 | |
3e28d371 | 1425 | if (unlikely(rq_prio(rq) < queue_prio(sched_engine))) { |
349a2bc5 | 1426 | spin_unlock(&ve->base.sched_engine->lock); |
64b7a3fa CW |
1427 | break; |
1428 | } | |
6d06779e | 1429 | |
64b7a3fa | 1430 | if (last && !can_merge_rq(last, rq)) { |
349a2bc5 MB |
1431 | spin_unlock(&ve->base.sched_engine->lock); |
1432 | spin_unlock(&engine->sched_engine->lock); | |
64b7a3fa CW |
1433 | return; /* leave this for another sibling */ |
1434 | } | |
6d06779e | 1435 | |
64b7a3fa CW |
1436 | ENGINE_TRACE(engine, |
1437 | "virtual rq=%llx:%lld%s, new engine? %s\n", | |
1438 | rq->fence.context, | |
1439 | rq->fence.seqno, | |
6f0726b4 CW |
1440 | __i915_request_is_complete(rq) ? "!" : |
1441 | __i915_request_has_started(rq) ? "*" : | |
64b7a3fa | 1442 | "", |
01fabda8 | 1443 | str_yes_no(engine != ve->siblings[0])); |
6d06779e | 1444 | |
64b7a3fa | 1445 | WRITE_ONCE(ve->request, NULL); |
3e28d371 | 1446 | WRITE_ONCE(ve->base.sched_engine->queue_priority_hint, INT_MIN); |
6d06779e | 1447 | |
64b7a3fa CW |
1448 | rb = &ve->nodes[engine->id].rb; |
1449 | rb_erase_cached(rb, &execlists->virtual); | |
1450 | RB_CLEAR_NODE(rb); | |
c0bb487d | 1451 | |
64b7a3fa CW |
1452 | GEM_BUG_ON(!(rq->execution_mask & engine->mask)); |
1453 | WRITE_ONCE(rq->engine, engine); | |
1454 | ||
1455 | if (__i915_request_submit(rq)) { | |
c0bb487d | 1456 | /* |
64b7a3fa CW |
1457 | * Only after we confirm that we will submit |
1458 | * this request (i.e. it has not already | |
1459 | * completed), do we want to update the context. | |
1460 | * | |
1461 | * This serves two purposes. It avoids | |
1462 | * unnecessary work if we are resubmitting an | |
1463 | * already completed request after timeslicing. | |
1464 | * But more importantly, it prevents us altering | |
1465 | * ve->siblings[] on an idle context, where | |
1466 | * we may be using ve->siblings[] in | |
1467 | * virtual_context_enter / virtual_context_exit. | |
c0bb487d | 1468 | */ |
64b7a3fa CW |
1469 | virtual_xfer_context(ve, engine); |
1470 | GEM_BUG_ON(ve->siblings[0] != engine); | |
1471 | ||
1472 | submit = true; | |
1473 | last = rq; | |
6d06779e CW |
1474 | } |
1475 | ||
64b7a3fa CW |
1476 | i915_request_put(rq); |
1477 | unlock: | |
349a2bc5 | 1478 | spin_unlock(&ve->base.sched_engine->lock); |
64b7a3fa CW |
1479 | |
1480 | /* | |
1481 | * Hmm, we have a bunch of virtual engine requests, | |
1482 | * but the first one was already completed (thanks | |
1483 | * preempt-to-busy!). Keep looking at the veng queue | |
1484 | * until we have no more relevant requests (i.e. | |
1485 | * the normal submit queue has higher priority). | |
1486 | */ | |
1487 | if (submit) | |
1488 | break; | |
6d06779e CW |
1489 | } |
1490 | ||
3e28d371 | 1491 | while ((rb = rb_first_cached(&sched_engine->queue))) { |
f6322edd | 1492 | struct i915_priolist *p = to_priolist(rb); |
e61e0f51 | 1493 | struct i915_request *rq, *rn; |
6c067579 | 1494 | |
2867ff6c | 1495 | priolist_for_each_request_consume(rq, rn, p) { |
c0bb487d | 1496 | bool merge = true; |
22b7a426 | 1497 | |
6c067579 CW |
1498 | /* |
1499 | * Can we combine this request with the current port? | |
1500 | * It has to be the same context/ringbuffer and not | |
1501 | * have any exceptions (e.g. GVT saying never to | |
1502 | * combine contexts). | |
1503 | * | |
1504 | * If we can combine the requests, we can execute both | |
1505 | * by updating the RING_TAIL to point to the end of the | |
1506 | * second request, and so we never need to tell the | |
1507 | * hardware about the first. | |
70c2a24d | 1508 | */ |
c10c78ad | 1509 | if (last && !can_merge_rq(last, rq)) { |
6c067579 CW |
1510 | /* |
1511 | * If we are on the second port and cannot | |
1512 | * combine this request with the last, then we | |
1513 | * are done. | |
1514 | */ | |
85f5e1f3 | 1515 | if (port == last_port) |
6c067579 | 1516 | goto done; |
6c067579 | 1517 | |
c10c78ad CW |
1518 | /* |
1519 | * We must not populate both ELSP[] with the | |
1520 | * same LRCA, i.e. we must submit 2 different | |
1521 | * contexts if we submit 2 ELSP. | |
1522 | */ | |
9f3ccd40 | 1523 | if (last->context == rq->context) |
c10c78ad CW |
1524 | goto done; |
1525 | ||
c3eb54aa CW |
1526 | if (i915_request_has_sentinel(last)) |
1527 | goto done; | |
1528 | ||
f81475bb CW |
1529 | /* |
1530 | * We avoid submitting virtual requests into | |
1531 | * the secondary ports so that we can migrate | |
1532 | * the request immediately to another engine | |
1533 | * rather than wait for the primary request. | |
1534 | */ | |
1535 | if (rq->execution_mask != engine->mask) | |
1536 | goto done; | |
1537 | ||
6c067579 CW |
1538 | /* |
1539 | * If GVT overrides us we only ever submit | |
1540 | * port[0], leaving port[1] empty. Note that we | |
1541 | * also have to be careful that we don't queue | |
1542 | * the same context (even though a different | |
1543 | * request) to the second port. | |
1544 | */ | |
9f3ccd40 CW |
1545 | if (ctx_single_port_submission(last->context) || |
1546 | ctx_single_port_submission(rq->context)) | |
6c067579 | 1547 | goto done; |
6c067579 | 1548 | |
c0bb487d | 1549 | merge = false; |
6c067579 | 1550 | } |
70c2a24d | 1551 | |
c0bb487d CW |
1552 | if (__i915_request_submit(rq)) { |
1553 | if (!merge) { | |
16f2941a | 1554 | *port++ = i915_request_get(last); |
c0bb487d CW |
1555 | last = NULL; |
1556 | } | |
1557 | ||
1558 | GEM_BUG_ON(last && | |
9f3ccd40 CW |
1559 | !can_merge_ctx(last->context, |
1560 | rq->context)); | |
1eaa251b CW |
1561 | GEM_BUG_ON(last && |
1562 | i915_seqno_passed(last->fence.seqno, | |
1563 | rq->fence.seqno)); | |
c0bb487d CW |
1564 | |
1565 | submit = true; | |
1566 | last = rq; | |
1567 | } | |
70c2a24d | 1568 | } |
d55ac5bf | 1569 | |
3e28d371 | 1570 | rb_erase_cached(&p->node, &sched_engine->queue); |
32eb6bcf | 1571 | i915_priolist_free(p); |
f6322edd | 1572 | } |
6c067579 | 1573 | done: |
16f2941a CW |
1574 | *port++ = i915_request_get(last); |
1575 | ||
15c83c43 CW |
1576 | /* |
1577 | * Here be a bit of magic! Or sleight-of-hand, whichever you prefer. | |
1578 | * | |
4d97cbe0 | 1579 | * We choose the priority hint such that if we add a request of greater |
15c83c43 CW |
1580 | * priority than this, we kick the submission tasklet to decide on |
1581 | * the right order of submitting the requests to hardware. We must | |
1582 | * also be prepared to reorder requests as they are in-flight on the | |
4d97cbe0 | 1583 | * HW. We derive the priority hint then as the first "hole" in |
15c83c43 CW |
1584 | * the HW submission ports and if there are no available slots, |
1585 | * the priority of the lowest executing request, i.e. last. | |
1586 | * | |
1587 | * When we do receive a higher priority request ready to run from the | |
4d97cbe0 | 1588 | * user, see queue_request(), the priority hint is bumped to that |
15c83c43 CW |
1589 | * request triggering preemption on the next dequeue (or subsequent |
1590 | * interrupt for secondary ports). | |
1591 | */ | |
3e28d371 | 1592 | sched_engine->queue_priority_hint = queue_prio(sched_engine); |
c4fd7d8c | 1593 | i915_sched_engine_reset_on_empty(sched_engine); |
349a2bc5 | 1594 | spin_unlock(&sched_engine->lock); |
15c83c43 | 1595 | |
4386b8e5 CW |
1596 | /* |
1597 | * We can skip poking the HW if we ended up with exactly the same set | |
1598 | * of requests as currently running, e.g. trying to timeslice a pair | |
1599 | * of ordered contexts. | |
1600 | */ | |
1601 | if (submit && | |
a2dd2ff5 | 1602 | memcmp(active, |
4386b8e5 CW |
1603 | execlists->pending, |
1604 | (port - execlists->pending) * sizeof(*port))) { | |
16f2941a CW |
1605 | *port = NULL; |
1606 | while (port-- != execlists->pending) | |
1607 | execlists_schedule_in(*port, port - execlists->pending); | |
1608 | ||
c4e8ba73 | 1609 | WRITE_ONCE(execlists->yield, -1); |
a2dd2ff5 | 1610 | set_preempt_timeout(engine, *active); |
68ace460 | 1611 | execlists_submit_ports(engine); |
8db7933e CW |
1612 | } else { |
1613 | ring_set_paused(engine, 0); | |
16f2941a CW |
1614 | while (port-- != execlists->pending) |
1615 | i915_request_put(*port); | |
1616 | *execlists->pending = NULL; | |
0b02befa | 1617 | } |
4413c474 CW |
1618 | } |
1619 | ||
16f2941a CW |
1620 | static void execlists_dequeue_irq(struct intel_engine_cs *engine) |
1621 | { | |
1622 | local_irq_disable(); /* Suspend interrupts across request submission */ | |
1623 | execlists_dequeue(engine); | |
1624 | local_irq_enable(); /* flush irq_work (e.g. breadcrumb enabling) */ | |
1625 | } | |
1626 | ||
e7326336 | 1627 | static void clear_ports(struct i915_request **ports, int count) |
6f0726b4 CW |
1628 | { |
1629 | memset_p((void **)ports, NULL, count); | |
1630 | } | |
1631 | ||
e7326336 | 1632 | static void |
6f0726b4 CW |
1633 | copy_ports(struct i915_request **dst, struct i915_request **src, int count) |
1634 | { | |
1635 | /* A memcpy_p() would be very useful here! */ | |
1636 | while (count--) | |
1637 | WRITE_ONCE(*dst++, *src++); /* avoid write tearing */ | |
1638 | } | |
1639 | ||
1640 | static struct i915_request ** | |
1641 | cancel_port_requests(struct intel_engine_execlists * const execlists, | |
1642 | struct i915_request **inactive) | |
cf4591d1 | 1643 | { |
da0ef77e | 1644 | struct i915_request * const *port; |
702791f7 | 1645 | |
da0ef77e | 1646 | for (port = execlists->pending; *port; port++) |
6f0726b4 | 1647 | *inactive++ = *port; |
ab17e6ca | 1648 | clear_ports(execlists->pending, ARRAY_SIZE(execlists->pending)); |
7e44fc28 | 1649 | |
331bf905 | 1650 | /* Mark the end of active before we overwrite *active */ |
da0ef77e | 1651 | for (port = xchg(&execlists->active, execlists->pending); *port; port++) |
6f0726b4 | 1652 | *inactive++ = *port; |
ab17e6ca CW |
1653 | clear_ports(execlists->inflight, ARRAY_SIZE(execlists->inflight)); |
1654 | ||
f494960d | 1655 | smp_wmb(); /* complete the seqlock for execlists_active() */ |
ab17e6ca | 1656 | WRITE_ONCE(execlists->active, execlists->inflight); |
f867b66e CW |
1657 | |
1658 | /* Having cancelled all outstanding process_csb(), stop their timers */ | |
1659 | GEM_BUG_ON(execlists->pending[0]); | |
1660 | cancel_timer(&execlists->timer); | |
1661 | cancel_timer(&execlists->preempt); | |
6f0726b4 CW |
1662 | |
1663 | return inactive; | |
cf4591d1 MK |
1664 | } |
1665 | ||
f4785682 DCS |
1666 | /* |
1667 | * Starting with Gen12, the status has a new format: | |
1668 | * | |
1669 | * bit 0: switched to new queue | |
1670 | * bit 1: reserved | |
1671 | * bit 2: semaphore wait mode (poll or signal), only valid when | |
1672 | * switch detail is set to "wait on semaphore" | |
1673 | * bits 3-5: engine class | |
1674 | * bits 6-11: engine instance | |
1675 | * bits 12-14: reserved | |
1676 | * bits 15-25: sw context id of the lrc the GT switched to | |
1677 | * bits 26-31: sw counter of the lrc the GT switched to | |
1678 | * bits 32-35: context switch detail | |
1679 | * - 0: ctx complete | |
1680 | * - 1: wait on sync flip | |
1681 | * - 2: wait on vblank | |
1682 | * - 3: wait on scanline | |
1683 | * - 4: wait on semaphore | |
1684 | * - 5: context preempted (not on SEMAPHORE_WAIT or | |
1685 | * WAIT_FOR_EVENT) | |
1686 | * bit 36: reserved | |
1687 | * bits 37-43: wait detail (for switch detail 1 to 4) | |
1688 | * bits 44-46: reserved | |
1689 | * bits 47-57: sw context id of the lrc the GT switched away from | |
1690 | * bits 58-63: sw counter of the lrc the GT switched away from | |
50a9ea08 SS |
1691 | * |
1692 | * Xe_HP csb shuffles things around compared to TGL: | |
1693 | * | |
1694 | * bits 0-3: context switch detail (same possible values as TGL) | |
1695 | * bits 4-9: engine instance | |
1696 | * bits 10-25: sw context id of the lrc the GT switched to | |
1697 | * bits 26-31: sw counter of the lrc the GT switched to | |
1698 | * bit 32: semaphore wait mode (poll or signal), Only valid when | |
1699 | * switch detail is set to "wait on semaphore" | |
1700 | * bit 33: switched to new queue | |
1701 | * bits 34-41: wait detail (for switch detail 1 to 4) | |
1702 | * bits 42-57: sw context id of the lrc the GT switched away from | |
1703 | * bits 58-63: sw counter of the lrc the GT switched away from | |
f4785682 | 1704 | */ |
50a9ea08 SS |
1705 | static inline bool |
1706 | __gen12_csb_parse(bool ctx_to_valid, bool ctx_away_valid, bool new_queue, | |
1707 | u8 switch_detail) | |
f4785682 | 1708 | { |
f4785682 DCS |
1709 | /* |
1710 | * The context switch detail is not guaranteed to be 5 when a preemption | |
1711 | * occurs, so we can't just check for that. The check below works for | |
1712 | * all the cases we care about, including preemptions of WAIT | |
1713 | * instructions and lite-restore. Preempt-to-idle via the CTRL register | |
1714 | * would require some extra handling, but we don't support that. | |
1715 | */ | |
f9d4eae2 | 1716 | if (!ctx_away_valid || new_queue) { |
50a9ea08 | 1717 | GEM_BUG_ON(!ctx_to_valid); |
198d2533 | 1718 | return true; |
f9d4eae2 | 1719 | } |
f4785682 DCS |
1720 | |
1721 | /* | |
1722 | * switch detail = 5 is covered by the case above and we do not expect a | |
1723 | * context switch on an unsuccessful wait instruction since we always | |
1724 | * use polling mode. | |
1725 | */ | |
50a9ea08 | 1726 | GEM_BUG_ON(switch_detail); |
198d2533 | 1727 | return false; |
f4785682 DCS |
1728 | } |
1729 | ||
50a9ea08 SS |
1730 | static bool xehp_csb_parse(const u64 csb) |
1731 | { | |
1732 | return __gen12_csb_parse(XEHP_CSB_CTX_VALID(lower_32_bits(csb)), /* cxt to */ | |
1733 | XEHP_CSB_CTX_VALID(upper_32_bits(csb)), /* cxt away */ | |
1734 | upper_32_bits(csb) & XEHP_CTX_STATUS_SWITCHED_TO_NEW_QUEUE, | |
1735 | GEN12_CTX_SWITCH_DETAIL(lower_32_bits(csb))); | |
1736 | } | |
1737 | ||
1738 | static bool gen12_csb_parse(const u64 csb) | |
1739 | { | |
1740 | return __gen12_csb_parse(GEN12_CSB_CTX_VALID(lower_32_bits(csb)), /* cxt to */ | |
1741 | GEN12_CSB_CTX_VALID(upper_32_bits(csb)), /* cxt away */ | |
1742 | lower_32_bits(csb) & GEN12_CTX_STATUS_SWITCHED_TO_NEW_QUEUE, | |
1743 | GEN12_CTX_SWITCH_DETAIL(upper_32_bits(csb))); | |
1744 | } | |
1745 | ||
e7326336 | 1746 | static bool gen8_csb_parse(const u64 csb) |
884c4074 CW |
1747 | { |
1748 | return csb & (GEN8_CTX_STATUS_IDLE_ACTIVE | GEN8_CTX_STATUS_PREEMPTED); | |
1749 | } | |
1750 | ||
4ff64bcf CW |
1751 | static noinline u64 |
1752 | wa_csb_read(const struct intel_engine_cs *engine, u64 * const csb) | |
8759aa4c | 1753 | { |
884c4074 CW |
1754 | u64 entry; |
1755 | ||
4ff64bcf CW |
1756 | /* |
1757 | * Reading from the HWSP has one particular advantage: we can detect | |
1758 | * a stale entry. Since the write into HWSP is broken, we have no reason | |
1759 | * to trust the HW at all, the mmio entry may equally be unordered, so | |
1760 | * we prefer the path that is self-checking and as a last resort, | |
1761 | * return the mmio value. | |
1762 | * | |
1763 | * tgl,dg1:HSDES#22011327657 | |
1764 | */ | |
884c4074 | 1765 | preempt_disable(); |
4ff64bcf CW |
1766 | if (wait_for_atomic_us((entry = READ_ONCE(*csb)) != -1, 10)) { |
1767 | int idx = csb - engine->execlists.csb_status; | |
1768 | int status; | |
1769 | ||
1770 | status = GEN8_EXECLISTS_STATUS_BUF; | |
1771 | if (idx >= 6) { | |
1772 | status = GEN11_EXECLISTS_STATUS_BUF2; | |
1773 | idx -= 6; | |
1774 | } | |
1775 | status += sizeof(u64) * idx; | |
1776 | ||
1777 | entry = intel_uncore_read64(engine->uncore, | |
1778 | _MMIO(engine->mmio_base + status)); | |
1779 | } | |
884c4074 CW |
1780 | preempt_enable(); |
1781 | ||
1782 | return entry; | |
1783 | } | |
1784 | ||
e7326336 | 1785 | static u64 csb_read(const struct intel_engine_cs *engine, u64 * const csb) |
884c4074 CW |
1786 | { |
1787 | u64 entry = READ_ONCE(*csb); | |
1788 | ||
1789 | /* | |
1790 | * Unfortunately, the GPU does not always serialise its write | |
1791 | * of the CSB entries before its write of the CSB pointer, at least | |
1792 | * from the perspective of the CPU, using what is known as a Global | |
1793 | * Observation Point. We may read a new CSB tail pointer, but then | |
1794 | * read the stale CSB entries, causing us to misinterpret the | |
1795 | * context-switch events, and eventually declare the GPU hung. | |
1796 | * | |
1797 | * icl:HSDES#1806554093 | |
1798 | * tgl:HSDES#22011248461 | |
1799 | */ | |
1800 | if (unlikely(entry == -1)) | |
4ff64bcf | 1801 | entry = wa_csb_read(engine, csb); |
884c4074 CW |
1802 | |
1803 | /* Consume this entry so that we can spot its future reuse. */ | |
1804 | WRITE_ONCE(*csb, -1); | |
1805 | ||
1806 | /* ELSP is an implicit wmb() before the GPU wraps and overwrites csb */ | |
1807 | return entry; | |
8759aa4c CW |
1808 | } |
1809 | ||
4386b8e5 CW |
1810 | static void new_timeslice(struct intel_engine_execlists *el) |
1811 | { | |
1812 | /* By cancelling, we will start afresh in start_timeslice() */ | |
1813 | cancel_timer(&el->timer); | |
1814 | } | |
1815 | ||
6f0726b4 CW |
1816 | static struct i915_request ** |
1817 | process_csb(struct intel_engine_cs *engine, struct i915_request **inactive) | |
e981e7b1 | 1818 | { |
b620e870 | 1819 | struct intel_engine_execlists * const execlists = &engine->execlists; |
884c4074 | 1820 | u64 * const buf = execlists->csb_status; |
7d4c75d9 | 1821 | const u8 num_entries = execlists->csb_size; |
4386b8e5 | 1822 | struct i915_request **prev; |
bc4237ec | 1823 | u8 head, tail; |
c6a2ac71 | 1824 | |
3c00660d CW |
1825 | /* |
1826 | * As we modify our execlists state tracking we require exclusive | |
1827 | * access. Either we are inside the tasklet, or the tasklet is disabled | |
1828 | * and we assume that is only inside the reset paths and so serialised. | |
1829 | */ | |
22916bad MB |
1830 | GEM_BUG_ON(!tasklet_is_locked(&engine->sched_engine->tasklet) && |
1831 | !reset_in_progress(engine)); | |
c9a64622 | 1832 | |
bc4237ec CW |
1833 | /* |
1834 | * Note that csb_write, csb_status may be either in HWSP or mmio. | |
1835 | * When reading from the csb_write mmio register, we have to be | |
1836 | * careful to only use the GEN8_CSB_WRITE_PTR portion, which is | |
1837 | * the low 4bits. As it happens we know the next 4bits are always | |
1838 | * zero and so we can simply masked off the low u8 of the register | |
1839 | * and treat it identically to reading from the HWSP (without having | |
1840 | * to use explicit shifting and masking, and probably bifurcating | |
1841 | * the code to handle the legacy mmio read). | |
1842 | */ | |
1843 | head = execlists->csb_head; | |
1844 | tail = READ_ONCE(*execlists->csb_write); | |
bc4237ec | 1845 | if (unlikely(head == tail)) |
6f0726b4 | 1846 | return inactive; |
b2209e62 | 1847 | |
b2295e2e CW |
1848 | /* |
1849 | * We will consume all events from HW, or at least pretend to. | |
1850 | * | |
1851 | * The sequence of events from the HW is deterministic, and derived | |
1852 | * from our writes to the ELSP, with a smidgen of variability for | |
1853 | * the arrival of the asynchronous requests wrt to the inflight | |
1854 | * execution. If the HW sends an event that does not correspond with | |
1855 | * the one we are expecting, we have to abandon all hope as we lose | |
1856 | * all tracking of what the engine is actually executing. We will | |
1857 | * only detect we are out of sequence with the HW when we get an | |
1858 | * 'impossible' event because we have already drained our own | |
1859 | * preemption/promotion queue. If this occurs, we know that we likely | |
1860 | * lost track of execution earlier and must unwind and restart, the | |
1861 | * simplest way is by stop processing the event queue and force the | |
1862 | * engine to reset. | |
1863 | */ | |
1864 | execlists->csb_head = tail; | |
1865 | ENGINE_TRACE(engine, "cs-irq head=%d, tail=%d\n", head, tail); | |
1866 | ||
bc4237ec CW |
1867 | /* |
1868 | * Hopefully paired with a wmb() in HW! | |
1869 | * | |
1870 | * We must complete the read of the write pointer before any reads | |
1871 | * from the CSB, so that we do not see stale values. Without an rmb | |
1872 | * (lfence) the HW may speculatively perform the CSB[] reads *before* | |
1873 | * we perform the READ_ONCE(*csb_write). | |
1874 | */ | |
1875 | rmb(); | |
4386b8e5 CW |
1876 | |
1877 | /* Remember who was last running under the timer */ | |
1878 | prev = inactive; | |
1879 | *prev = NULL; | |
1880 | ||
bc4237ec | 1881 | do { |
198d2533 | 1882 | bool promote; |
884c4074 | 1883 | u64 csb; |
f4785682 | 1884 | |
7d4c75d9 | 1885 | if (++head == num_entries) |
8ea397fa CW |
1886 | head = 0; |
1887 | ||
1888 | /* | |
1889 | * We are flying near dragons again. | |
1890 | * | |
1891 | * We hold a reference to the request in execlist_port[] | |
1892 | * but no more than that. We are operating in softirq | |
1893 | * context and so cannot hold any mutex or sleep. That | |
1894 | * prevents us stopping the requests we are processing | |
1895 | * in port[] from being retired simultaneously (the | |
1896 | * breadcrumb will be complete before we see the | |
1897 | * context-switch). As we only hold the reference to the | |
1898 | * request, any pointer chasing underneath the request | |
1899 | * is subject to a potential use-after-free. Thus we | |
1900 | * store all of the bookkeeping within port[] as | |
1901 | * required, and avoid using unguarded pointers beneath | |
1902 | * request itself. The same applies to the atomic | |
1903 | * status notifier. | |
1904 | */ | |
1905 | ||
4ff64bcf | 1906 | csb = csb_read(engine, buf + head); |
639f2f24 | 1907 | ENGINE_TRACE(engine, "csb[%d]: status=0x%08x:0x%08x\n", |
884c4074 | 1908 | head, upper_32_bits(csb), lower_32_bits(csb)); |
8ea397fa | 1909 | |
50a9ea08 SS |
1910 | if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) |
1911 | promote = xehp_csb_parse(csb); | |
1912 | else if (GRAPHICS_VER(engine->i915) >= 12) | |
884c4074 | 1913 | promote = gen12_csb_parse(csb); |
f4785682 | 1914 | else |
884c4074 | 1915 | promote = gen8_csb_parse(csb); |
198d2533 | 1916 | if (promote) { |
331bf905 CW |
1917 | struct i915_request * const *old = execlists->active; |
1918 | ||
b2295e2e CW |
1919 | if (GEM_WARN_ON(!*execlists->pending)) { |
1920 | execlists->error_interrupt |= ERROR_CSB; | |
1921 | break; | |
1922 | } | |
1923 | ||
b6560007 CW |
1924 | ring_set_paused(engine, 0); |
1925 | ||
331bf905 CW |
1926 | /* Point active to the new ELSP; prevent overwriting */ |
1927 | WRITE_ONCE(execlists->active, execlists->pending); | |
f494960d | 1928 | smp_wmb(); /* notify execlists_active() */ |
331bf905 | 1929 | |
198d2533 | 1930 | /* cancel old inflight, prepare for switch */ |
331bf905 CW |
1931 | trace_ports(execlists, "preempted", old); |
1932 | while (*old) | |
6f0726b4 | 1933 | *inactive++ = *old++; |
8759aa4c | 1934 | |
198d2533 | 1935 | /* switch pending to inflight */ |
e2ccf0d0 | 1936 | GEM_BUG_ON(!assert_pending_valid(execlists, "promote")); |
b4d9145b CW |
1937 | copy_ports(execlists->inflight, |
1938 | execlists->pending, | |
1939 | execlists_num_ports(execlists)); | |
f494960d CW |
1940 | smp_wmb(); /* complete the seqlock */ |
1941 | WRITE_ONCE(execlists->active, execlists->inflight); | |
8ee36e04 | 1942 | |
6ca7217d CW |
1943 | /* XXX Magic delay for tgl */ |
1944 | ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR); | |
1945 | ||
df403069 | 1946 | WRITE_ONCE(execlists->pending[0], NULL); |
198d2533 | 1947 | } else { |
b2295e2e CW |
1948 | if (GEM_WARN_ON(!*execlists->active)) { |
1949 | execlists->error_interrupt |= ERROR_CSB; | |
1950 | break; | |
1951 | } | |
22b7a426 | 1952 | |
198d2533 | 1953 | /* port0 completed, advanced to port1 */ |
8759aa4c | 1954 | trace_ports(execlists, "completed", execlists->active); |
2ffe80aa | 1955 | |
8ea397fa CW |
1956 | /* |
1957 | * We rely on the hardware being strongly | |
1958 | * ordered, that the breadcrumb write is | |
1959 | * coherent (visible from the CPU) before the | |
15501287 CW |
1960 | * user interrupt is processed. One might assume |
1961 | * that the breadcrumb write being before the | |
1962 | * user interrupt and the CS event for the context | |
1963 | * switch would therefore be before the CS event | |
1964 | * itself... | |
8ea397fa | 1965 | */ |
c616d238 | 1966 | if (GEM_SHOW_DEBUG() && |
6f0726b4 | 1967 | !__i915_request_is_complete(*execlists->active)) { |
e2ccf0d0 | 1968 | struct i915_request *rq = *execlists->active; |
e06b8524 CW |
1969 | const u32 *regs __maybe_unused = |
1970 | rq->context->lrc_reg_state; | |
c616d238 | 1971 | |
15501287 CW |
1972 | ENGINE_TRACE(engine, |
1973 | "context completed before request!\n"); | |
c616d238 CW |
1974 | ENGINE_TRACE(engine, |
1975 | "ring:{start:0x%08x, head:%04x, tail:%04x, ctl:%08x, mode:%08x}\n", | |
1976 | ENGINE_READ(engine, RING_START), | |
1977 | ENGINE_READ(engine, RING_HEAD) & HEAD_ADDR, | |
1978 | ENGINE_READ(engine, RING_TAIL) & TAIL_ADDR, | |
1979 | ENGINE_READ(engine, RING_CTL), | |
1980 | ENGINE_READ(engine, RING_MI_MODE)); | |
1981 | ENGINE_TRACE(engine, | |
1982 | "rq:{start:%08x, head:%04x, tail:%04x, seqno:%llx:%d, hwsp:%d}, ", | |
1983 | i915_ggtt_offset(rq->ring->vma), | |
1984 | rq->head, rq->tail, | |
1985 | rq->fence.context, | |
1986 | lower_32_bits(rq->fence.seqno), | |
1987 | hwsp_seqno(rq)); | |
1988 | ENGINE_TRACE(engine, | |
1989 | "ctx:{start:%08x, head:%04x, tail:%04x}, ", | |
1990 | regs[CTX_RING_START], | |
1991 | regs[CTX_RING_HEAD], | |
1992 | regs[CTX_RING_TAIL]); | |
c616d238 CW |
1993 | } |
1994 | ||
6f0726b4 | 1995 | *inactive++ = *execlists->active++; |
beecec90 | 1996 | |
22b7a426 CW |
1997 | GEM_BUG_ON(execlists->active - execlists->inflight > |
1998 | execlists_num_ports(execlists)); | |
4af0d727 | 1999 | } |
bc4237ec | 2000 | } while (head != tail); |
e981e7b1 | 2001 | |
d8f50531 MK |
2002 | /* |
2003 | * Gen11 has proven to fail wrt global observation point between | |
2004 | * entry and tail update, failing on the ordering and thus | |
2005 | * we see an old entry in the context status buffer. | |
2006 | * | |
2007 | * Forcibly evict out entries for the next gpu csb update, | |
2008 | * to increase the odds that we get a fresh entries with non | |
2009 | * working hardware. The cost for doing so comes out mostly with | |
2010 | * the wash as hardware, working or not, will need to do the | |
2011 | * invalidation before. | |
2012 | */ | |
dc040682 | 2013 | drm_clflush_virt_range(&buf[0], num_entries * sizeof(buf[0])); |
6f0726b4 | 2014 | |
4386b8e5 CW |
2015 | /* |
2016 | * We assume that any event reflects a change in context flow | |
2017 | * and merits a fresh timeslice. We reinstall the timer after | |
2018 | * inspecting the queue to see if we need to resumbit. | |
2019 | */ | |
bb6287cb | 2020 | if (*prev != *execlists->active) { /* elide lite-restores */ |
dc342156 TU |
2021 | struct intel_context *prev_ce = NULL, *active_ce = NULL; |
2022 | ||
bb6287cb TU |
2023 | /* |
2024 | * Note the inherent discrepancy between the HW runtime, | |
2025 | * recorded as part of the context switch, and the CPU | |
2026 | * adjustment for active contexts. We have to hope that | |
2027 | * the delay in processing the CS event is very small | |
2028 | * and consistent. It works to our advantage to have | |
2029 | * the CPU adjustment _undershoot_ (i.e. start later than) | |
2030 | * the CS timestamp so we never overreport the runtime | |
2031 | * and correct overselves later when updating from HW. | |
2032 | */ | |
2033 | if (*prev) | |
dc342156 | 2034 | prev_ce = (*prev)->context; |
bb6287cb | 2035 | if (*execlists->active) |
dc342156 TU |
2036 | active_ce = (*execlists->active)->context; |
2037 | if (prev_ce != active_ce) { | |
2038 | if (prev_ce) | |
2039 | lrc_runtime_stop(prev_ce); | |
2040 | if (active_ce) | |
2041 | lrc_runtime_start(active_ce); | |
2042 | } | |
4386b8e5 | 2043 | new_timeslice(execlists); |
bb6287cb | 2044 | } |
4386b8e5 | 2045 | |
6f0726b4 CW |
2046 | return inactive; |
2047 | } | |
2048 | ||
2049 | static void post_process_csb(struct i915_request **port, | |
2050 | struct i915_request **last) | |
2051 | { | |
2052 | while (port != last) | |
2053 | execlists_schedule_out(*port++); | |
73377dbc | 2054 | } |
c6a2ac71 | 2055 | |
32ff621f CW |
2056 | static void __execlists_hold(struct i915_request *rq) |
2057 | { | |
2058 | LIST_HEAD(list); | |
2059 | ||
2060 | do { | |
2061 | struct i915_dependency *p; | |
2062 | ||
2063 | if (i915_request_is_active(rq)) | |
2064 | __i915_request_unsubmit(rq); | |
2065 | ||
32ff621f | 2066 | clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); |
349a2bc5 MB |
2067 | list_move_tail(&rq->sched.link, |
2068 | &rq->engine->sched_engine->hold); | |
32ff621f | 2069 | i915_request_set_hold(rq); |
26208d87 | 2070 | RQ_TRACE(rq, "on hold\n"); |
32ff621f | 2071 | |
793c2261 | 2072 | for_each_waiter(p, rq) { |
32ff621f CW |
2073 | struct i915_request *w = |
2074 | container_of(p->waiter, typeof(*w), sched); | |
2075 | ||
aba73826 CW |
2076 | if (p->flags & I915_DEPENDENCY_WEAK) |
2077 | continue; | |
2078 | ||
32ff621f CW |
2079 | /* Leave semaphores spinning on the other engines */ |
2080 | if (w->engine != rq->engine) | |
2081 | continue; | |
2082 | ||
2083 | if (!i915_request_is_ready(w)) | |
2084 | continue; | |
2085 | ||
6f0726b4 | 2086 | if (__i915_request_is_complete(w)) |
32ff621f CW |
2087 | continue; |
2088 | ||
26208d87 | 2089 | if (i915_request_on_hold(w)) |
32ff621f CW |
2090 | continue; |
2091 | ||
2092 | list_move_tail(&w->sched.link, &list); | |
2093 | } | |
2094 | ||
2095 | rq = list_first_entry_or_null(&list, typeof(*rq), sched.link); | |
2096 | } while (rq); | |
2097 | } | |
2098 | ||
4ba5c086 | 2099 | static bool execlists_hold(struct intel_engine_cs *engine, |
32ff621f CW |
2100 | struct i915_request *rq) |
2101 | { | |
b9695405 CW |
2102 | if (i915_request_on_hold(rq)) |
2103 | return false; | |
2104 | ||
349a2bc5 | 2105 | spin_lock_irq(&engine->sched_engine->lock); |
32ff621f | 2106 | |
6f0726b4 | 2107 | if (__i915_request_is_complete(rq)) { /* too late! */ |
4ba5c086 CW |
2108 | rq = NULL; |
2109 | goto unlock; | |
2110 | } | |
2111 | ||
32ff621f CW |
2112 | /* |
2113 | * Transfer this request onto the hold queue to prevent it | |
2114 | * being resumbitted to HW (and potentially completed) before we have | |
2115 | * released it. Since we may have already submitted following | |
2116 | * requests, we need to remove those as well. | |
2117 | */ | |
2118 | GEM_BUG_ON(i915_request_on_hold(rq)); | |
2119 | GEM_BUG_ON(rq->engine != engine); | |
2120 | __execlists_hold(rq); | |
349a2bc5 | 2121 | GEM_BUG_ON(list_empty(&engine->sched_engine->hold)); |
32ff621f | 2122 | |
4ba5c086 | 2123 | unlock: |
349a2bc5 | 2124 | spin_unlock_irq(&engine->sched_engine->lock); |
4ba5c086 | 2125 | return rq; |
32ff621f CW |
2126 | } |
2127 | ||
2128 | static bool hold_request(const struct i915_request *rq) | |
2129 | { | |
2130 | struct i915_dependency *p; | |
66940061 | 2131 | bool result = false; |
32ff621f CW |
2132 | |
2133 | /* | |
2134 | * If one of our ancestors is on hold, we must also be on hold, | |
2135 | * otherwise we will bypass it and execute before it. | |
2136 | */ | |
66940061 | 2137 | rcu_read_lock(); |
793c2261 | 2138 | for_each_signaler(p, rq) { |
32ff621f CW |
2139 | const struct i915_request *s = |
2140 | container_of(p->signaler, typeof(*s), sched); | |
2141 | ||
2142 | if (s->engine != rq->engine) | |
2143 | continue; | |
2144 | ||
66940061 CW |
2145 | result = i915_request_on_hold(s); |
2146 | if (result) | |
2147 | break; | |
32ff621f | 2148 | } |
66940061 | 2149 | rcu_read_unlock(); |
32ff621f | 2150 | |
66940061 | 2151 | return result; |
32ff621f CW |
2152 | } |
2153 | ||
2154 | static void __execlists_unhold(struct i915_request *rq) | |
2155 | { | |
2156 | LIST_HEAD(list); | |
2157 | ||
2158 | do { | |
2159 | struct i915_dependency *p; | |
2160 | ||
26208d87 CW |
2161 | RQ_TRACE(rq, "hold release\n"); |
2162 | ||
32ff621f CW |
2163 | GEM_BUG_ON(!i915_request_on_hold(rq)); |
2164 | GEM_BUG_ON(!i915_sw_fence_signaled(&rq->submit)); | |
2165 | ||
2166 | i915_request_clear_hold(rq); | |
2167 | list_move_tail(&rq->sched.link, | |
d2a31d02 | 2168 | i915_sched_lookup_priolist(rq->engine->sched_engine, |
32ff621f CW |
2169 | rq_prio(rq))); |
2170 | set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); | |
32ff621f CW |
2171 | |
2172 | /* Also release any children on this engine that are ready */ | |
793c2261 | 2173 | for_each_waiter(p, rq) { |
32ff621f CW |
2174 | struct i915_request *w = |
2175 | container_of(p->waiter, typeof(*w), sched); | |
2176 | ||
aba73826 CW |
2177 | if (p->flags & I915_DEPENDENCY_WEAK) |
2178 | continue; | |
2179 | ||
32ff621f CW |
2180 | if (w->engine != rq->engine) |
2181 | continue; | |
2182 | ||
26208d87 | 2183 | if (!i915_request_on_hold(w)) |
32ff621f CW |
2184 | continue; |
2185 | ||
2186 | /* Check that no other parents are also on hold */ | |
26208d87 | 2187 | if (hold_request(w)) |
32ff621f CW |
2188 | continue; |
2189 | ||
2190 | list_move_tail(&w->sched.link, &list); | |
2191 | } | |
2192 | ||
2193 | rq = list_first_entry_or_null(&list, typeof(*rq), sched.link); | |
2194 | } while (rq); | |
2195 | } | |
2196 | ||
32ff621f CW |
2197 | static void execlists_unhold(struct intel_engine_cs *engine, |
2198 | struct i915_request *rq) | |
2199 | { | |
349a2bc5 | 2200 | spin_lock_irq(&engine->sched_engine->lock); |
32ff621f CW |
2201 | |
2202 | /* | |
2203 | * Move this request back to the priority queue, and all of its | |
2204 | * children and grandchildren that were suspended along with it. | |
2205 | */ | |
2206 | __execlists_unhold(rq); | |
2207 | ||
3e28d371 MB |
2208 | if (rq_prio(rq) > engine->sched_engine->queue_priority_hint) { |
2209 | engine->sched_engine->queue_priority_hint = rq_prio(rq); | |
22916bad | 2210 | tasklet_hi_schedule(&engine->sched_engine->tasklet); |
32ff621f CW |
2211 | } |
2212 | ||
349a2bc5 | 2213 | spin_unlock_irq(&engine->sched_engine->lock); |
32ff621f CW |
2214 | } |
2215 | ||
74831738 CW |
2216 | struct execlists_capture { |
2217 | struct work_struct work; | |
2218 | struct i915_request *rq; | |
2219 | struct i915_gpu_coredump *error; | |
2220 | }; | |
2221 | ||
2222 | static void execlists_capture_work(struct work_struct *work) | |
2223 | { | |
2224 | struct execlists_capture *cap = container_of(work, typeof(*cap), work); | |
8b91cdd4 TH |
2225 | const gfp_t gfp = __GFP_KSWAPD_RECLAIM | __GFP_RETRY_MAYFAIL | |
2226 | __GFP_NOWARN; | |
74831738 CW |
2227 | struct intel_engine_cs *engine = cap->rq->engine; |
2228 | struct intel_gt_coredump *gt = cap->error->gt; | |
2229 | struct intel_engine_capture_vma *vma; | |
2230 | ||
2231 | /* Compress all the objects attached to the request, slow! */ | |
2232 | vma = intel_engine_coredump_add_request(gt->engine, cap->rq, gfp); | |
2233 | if (vma) { | |
2234 | struct i915_vma_compress *compress = | |
2235 | i915_vma_capture_prepare(gt); | |
2236 | ||
2237 | intel_engine_coredump_add_vma(gt->engine, vma, compress); | |
2238 | i915_vma_capture_finish(gt, compress); | |
2239 | } | |
2240 | ||
2241 | gt->simulated = gt->engine->simulated; | |
2242 | cap->error->simulated = gt->simulated; | |
2243 | ||
2244 | /* Publish the error state, and announce it to the world */ | |
2245 | i915_error_state_store(cap->error); | |
2246 | i915_gpu_coredump_put(cap->error); | |
2247 | ||
2248 | /* Return this request and all that depend upon it for signaling */ | |
2249 | execlists_unhold(engine, cap->rq); | |
4ba5c086 | 2250 | i915_request_put(cap->rq); |
74831738 CW |
2251 | |
2252 | kfree(cap); | |
2253 | } | |
2254 | ||
2255 | static struct execlists_capture *capture_regs(struct intel_engine_cs *engine) | |
2256 | { | |
2257 | const gfp_t gfp = GFP_ATOMIC | __GFP_NOWARN; | |
2258 | struct execlists_capture *cap; | |
2259 | ||
2260 | cap = kmalloc(sizeof(*cap), gfp); | |
2261 | if (!cap) | |
2262 | return NULL; | |
2263 | ||
2264 | cap->error = i915_gpu_coredump_alloc(engine->i915, gfp); | |
2265 | if (!cap->error) | |
2266 | goto err_cap; | |
2267 | ||
a6f0f9cf | 2268 | cap->error->gt = intel_gt_coredump_alloc(engine->gt, gfp, CORE_DUMP_FLAG_NONE); |
74831738 CW |
2269 | if (!cap->error->gt) |
2270 | goto err_gpu; | |
2271 | ||
a6f0f9cf | 2272 | cap->error->gt->engine = intel_engine_coredump_alloc(engine, gfp, CORE_DUMP_FLAG_NONE); |
74831738 CW |
2273 | if (!cap->error->gt->engine) |
2274 | goto err_gt; | |
2275 | ||
bda30024 TU |
2276 | cap->error->gt->engine->hung = true; |
2277 | ||
74831738 CW |
2278 | return cap; |
2279 | ||
2280 | err_gt: | |
2281 | kfree(cap->error->gt); | |
2282 | err_gpu: | |
2283 | kfree(cap->error); | |
2284 | err_cap: | |
2285 | kfree(cap); | |
2286 | return NULL; | |
2287 | } | |
2288 | ||
4c977837 CW |
2289 | static struct i915_request * |
2290 | active_context(struct intel_engine_cs *engine, u32 ccid) | |
2291 | { | |
2292 | const struct intel_engine_execlists * const el = &engine->execlists; | |
2293 | struct i915_request * const *port, *rq; | |
2294 | ||
2295 | /* | |
2296 | * Use the most recent result from process_csb(), but just in case | |
2297 | * we trigger an error (via interrupt) before the first CS event has | |
2298 | * been written, peek at the next submission. | |
2299 | */ | |
2300 | ||
2301 | for (port = el->active; (rq = *port); port++) { | |
2632f174 | 2302 | if (rq->context->lrc.ccid == ccid) { |
4c977837 | 2303 | ENGINE_TRACE(engine, |
6f0726b4 CW |
2304 | "ccid:%x found at active:%zd\n", |
2305 | ccid, port - el->active); | |
4c977837 CW |
2306 | return rq; |
2307 | } | |
2308 | } | |
2309 | ||
2310 | for (port = el->pending; (rq = *port); port++) { | |
2632f174 | 2311 | if (rq->context->lrc.ccid == ccid) { |
4c977837 | 2312 | ENGINE_TRACE(engine, |
6f0726b4 CW |
2313 | "ccid:%x found at pending:%zd\n", |
2314 | ccid, port - el->pending); | |
4c977837 CW |
2315 | return rq; |
2316 | } | |
2317 | } | |
2318 | ||
2319 | ENGINE_TRACE(engine, "ccid:%x not found\n", ccid); | |
2320 | return NULL; | |
2321 | } | |
2322 | ||
2323 | static u32 active_ccid(struct intel_engine_cs *engine) | |
2324 | { | |
2325 | return ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI); | |
2326 | } | |
2327 | ||
2730055d | 2328 | static void execlists_capture(struct intel_engine_cs *engine) |
74831738 | 2329 | { |
848a4e5c | 2330 | struct drm_i915_private *i915 = engine->i915; |
74831738 CW |
2331 | struct execlists_capture *cap; |
2332 | ||
2333 | if (!IS_ENABLED(CONFIG_DRM_I915_CAPTURE_ERROR)) | |
2730055d | 2334 | return; |
74831738 CW |
2335 | |
2336 | /* | |
2337 | * We need to _quickly_ capture the engine state before we reset. | |
2338 | * We are inside an atomic section (softirq) here and we are delaying | |
2339 | * the forced preemption event. | |
2340 | */ | |
2341 | cap = capture_regs(engine); | |
2342 | if (!cap) | |
2730055d | 2343 | return; |
74831738 | 2344 | |
349a2bc5 | 2345 | spin_lock_irq(&engine->sched_engine->lock); |
4c977837 | 2346 | cap->rq = active_context(engine, active_ccid(engine)); |
70a76a9b CW |
2347 | if (cap->rq) { |
2348 | cap->rq = active_request(cap->rq->context->timeline, cap->rq); | |
2349 | cap->rq = i915_request_get_rcu(cap->rq); | |
2350 | } | |
349a2bc5 | 2351 | spin_unlock_irq(&engine->sched_engine->lock); |
4ba5c086 CW |
2352 | if (!cap->rq) |
2353 | goto err_free; | |
74831738 CW |
2354 | |
2355 | /* | |
2356 | * Remove the request from the execlists queue, and take ownership | |
2357 | * of the request. We pass it to our worker who will _slowly_ compress | |
2358 | * all the pages the _user_ requested for debugging their batch, after | |
2359 | * which we return it to the queue for signaling. | |
2360 | * | |
2361 | * By removing them from the execlists queue, we also remove the | |
2362 | * requests from being processed by __unwind_incomplete_requests() | |
2363 | * during the intel_engine_reset(), and so they will *not* be replayed | |
2364 | * afterwards. | |
2365 | * | |
2366 | * Note that because we have not yet reset the engine at this point, | |
2367 | * it is possible for the request that we have identified as being | |
2368 | * guilty, did in fact complete and we will then hit an arbitration | |
2369 | * point allowing the outstanding preemption to succeed. The likelihood | |
2370 | * of that is very low (as capturing of the engine registers should be | |
2371 | * fast enough to run inside an irq-off atomic section!), so we will | |
2372 | * simply hold that request accountable for being non-preemptible | |
2373 | * long enough to force the reset. | |
2374 | */ | |
4ba5c086 CW |
2375 | if (!execlists_hold(engine, cap->rq)) |
2376 | goto err_rq; | |
74831738 CW |
2377 | |
2378 | INIT_WORK(&cap->work, execlists_capture_work); | |
848a4e5c | 2379 | queue_work(i915->unordered_wq, &cap->work); |
2730055d | 2380 | return; |
4ba5c086 CW |
2381 | |
2382 | err_rq: | |
2383 | i915_request_put(cap->rq); | |
2384 | err_free: | |
2385 | i915_gpu_coredump_put(cap->error); | |
2386 | kfree(cap); | |
74831738 CW |
2387 | } |
2388 | ||
70a76a9b | 2389 | static void execlists_reset(struct intel_engine_cs *engine, const char *msg) |
3a7a92ab CW |
2390 | { |
2391 | const unsigned int bit = I915_RESET_ENGINE + engine->id; | |
2392 | unsigned long *lock = &engine->gt->reset.flags; | |
2393 | ||
70a76a9b | 2394 | if (!intel_has_reset_engine(engine->gt)) |
3a7a92ab CW |
2395 | return; |
2396 | ||
2397 | if (test_and_set_bit(bit, lock)) | |
2398 | return; | |
2399 | ||
70a76a9b CW |
2400 | ENGINE_TRACE(engine, "reset for %s\n", msg); |
2401 | ||
3a7a92ab | 2402 | /* Mark this tasklet as disabled to avoid waiting for it to complete */ |
22916bad | 2403 | tasklet_disable_nosync(&engine->sched_engine->tasklet); |
3a7a92ab | 2404 | |
74831738 | 2405 | ring_set_paused(engine, 1); /* Freeze the current request in place */ |
2730055d CW |
2406 | execlists_capture(engine); |
2407 | intel_engine_reset(engine, msg); | |
3a7a92ab | 2408 | |
22916bad | 2409 | tasklet_enable(&engine->sched_engine->tasklet); |
3a7a92ab CW |
2410 | clear_and_wake_up_bit(bit, lock); |
2411 | } | |
2412 | ||
2413 | static bool preempt_timeout(const struct intel_engine_cs *const engine) | |
2414 | { | |
2415 | const struct timer_list *t = &engine->execlists.preempt; | |
2416 | ||
2417 | if (!CONFIG_DRM_I915_PREEMPT_TIMEOUT) | |
2418 | return false; | |
2419 | ||
2420 | if (!timer_expired(t)) | |
2421 | return false; | |
2422 | ||
16f2941a | 2423 | return engine->execlists.pending[0]; |
3a7a92ab CW |
2424 | } |
2425 | ||
9512f985 CW |
2426 | /* |
2427 | * Check the unread Context Status Buffers and manage the submission of new | |
2428 | * contexts to the ELSP accordingly. | |
2429 | */ | |
2913fa4d | 2430 | static void execlists_submission_tasklet(struct tasklet_struct *t) |
9512f985 | 2431 | { |
22916bad MB |
2432 | struct i915_sched_engine *sched_engine = |
2433 | from_tasklet(sched_engine, t, tasklet); | |
2434 | struct intel_engine_cs * const engine = sched_engine->private_data; | |
6f0726b4 CW |
2435 | struct i915_request *post[2 * EXECLIST_MAX_PORTS]; |
2436 | struct i915_request **inactive; | |
9512f985 | 2437 | |
6f0726b4 CW |
2438 | rcu_read_lock(); |
2439 | inactive = process_csb(engine, post); | |
2440 | GEM_BUG_ON(inactive - post > ARRAY_SIZE(post)); | |
70a76a9b | 2441 | |
16f2941a | 2442 | if (unlikely(preempt_timeout(engine))) { |
6ef7d362 CW |
2443 | const struct i915_request *rq = *engine->execlists.active; |
2444 | ||
2445 | /* | |
2446 | * If after the preempt-timeout expired, we are still on the | |
2447 | * same active request/context as before we initiated the | |
2448 | * preemption, reset the engine. | |
2449 | * | |
2450 | * However, if we have processed a CS event to switch contexts, | |
2451 | * but not yet processed the CS event for the pending | |
2452 | * preemption, reset the timer allowing the new context to | |
2453 | * gracefully exit. | |
2454 | */ | |
16f2941a | 2455 | cancel_timer(&engine->execlists.preempt); |
6ef7d362 CW |
2456 | if (rq == engine->execlists.preempt_target) |
2457 | engine->execlists.error_interrupt |= ERROR_PREEMPT; | |
2458 | else | |
2459 | set_timer_ms(&engine->execlists.preempt, | |
2460 | active_preempt_timeout(engine, rq)); | |
16f2941a CW |
2461 | } |
2462 | ||
70a76a9b | 2463 | if (unlikely(READ_ONCE(engine->execlists.error_interrupt))) { |
b2295e2e CW |
2464 | const char *msg; |
2465 | ||
2466 | /* Generate the error message in priority wrt to the user! */ | |
2467 | if (engine->execlists.error_interrupt & GENMASK(15, 0)) | |
2468 | msg = "CS error"; /* thrown by a user payload */ | |
2469 | else if (engine->execlists.error_interrupt & ERROR_CSB) | |
2470 | msg = "invalid CSB event"; | |
16f2941a CW |
2471 | else if (engine->execlists.error_interrupt & ERROR_PREEMPT) |
2472 | msg = "preemption time out"; | |
b2295e2e CW |
2473 | else |
2474 | msg = "internal error"; | |
2475 | ||
70a76a9b | 2476 | engine->execlists.error_interrupt = 0; |
b2295e2e | 2477 | execlists_reset(engine, msg); |
70a76a9b CW |
2478 | } |
2479 | ||
4386b8e5 | 2480 | if (!engine->execlists.pending[0]) { |
16f2941a | 2481 | execlists_dequeue_irq(engine); |
4386b8e5 CW |
2482 | start_timeslice(engine); |
2483 | } | |
6f0726b4 CW |
2484 | |
2485 | post_process_csb(post, inactive); | |
2486 | rcu_read_unlock(); | |
9512f985 CW |
2487 | } |
2488 | ||
0669a6e1 CW |
2489 | static void execlists_irq_handler(struct intel_engine_cs *engine, u16 iir) |
2490 | { | |
2491 | bool tasklet = false; | |
2492 | ||
2493 | if (unlikely(iir & GT_CS_MASTER_ERROR_INTERRUPT)) { | |
2494 | u32 eir; | |
2495 | ||
2496 | /* Upper 16b are the enabling mask, rsvd for internal errors */ | |
2497 | eir = ENGINE_READ(engine, RING_EIR) & GENMASK(15, 0); | |
2498 | ENGINE_TRACE(engine, "CS error: %x\n", eir); | |
2499 | ||
2500 | /* Disable the error interrupt until after the reset */ | |
2501 | if (likely(eir)) { | |
2502 | ENGINE_WRITE(engine, RING_EMR, ~0u); | |
2503 | ENGINE_WRITE(engine, RING_EIR, eir); | |
2504 | WRITE_ONCE(engine->execlists.error_interrupt, eir); | |
2505 | tasklet = true; | |
2506 | } | |
2507 | } | |
2508 | ||
2509 | if (iir & GT_WAIT_SEMAPHORE_INTERRUPT) { | |
2510 | WRITE_ONCE(engine->execlists.yield, | |
2511 | ENGINE_READ_FW(engine, RING_EXECLIST_STATUS_HI)); | |
2512 | ENGINE_TRACE(engine, "semaphore yield: %08x\n", | |
2513 | engine->execlists.yield); | |
2514 | if (del_timer(&engine->execlists.timer)) | |
2515 | tasklet = true; | |
2516 | } | |
2517 | ||
2518 | if (iir & GT_CONTEXT_SWITCH_INTERRUPT) | |
2519 | tasklet = true; | |
2520 | ||
2521 | if (iir & GT_RENDER_USER_INTERRUPT) | |
2522 | intel_engine_signal_breadcrumbs(engine); | |
2523 | ||
2524 | if (tasklet) | |
22916bad | 2525 | tasklet_hi_schedule(&engine->sched_engine->tasklet); |
0669a6e1 CW |
2526 | } |
2527 | ||
3a7a92ab | 2528 | static void __execlists_kick(struct intel_engine_execlists *execlists) |
8ee36e04 | 2529 | { |
22916bad MB |
2530 | struct intel_engine_cs *engine = |
2531 | container_of(execlists, typeof(*engine), execlists); | |
2532 | ||
8ee36e04 | 2533 | /* Kick the tasklet for some interrupt coalescing and reset handling */ |
22916bad | 2534 | tasklet_hi_schedule(&engine->sched_engine->tasklet); |
3a7a92ab CW |
2535 | } |
2536 | ||
2537 | #define execlists_kick(t, member) \ | |
2538 | __execlists_kick(container_of(t, struct intel_engine_execlists, member)) | |
2539 | ||
2540 | static void execlists_timeslice(struct timer_list *timer) | |
2541 | { | |
2542 | execlists_kick(timer, timer); | |
2543 | } | |
2544 | ||
2545 | static void execlists_preempt(struct timer_list *timer) | |
2546 | { | |
2547 | execlists_kick(timer, preempt); | |
8ee36e04 CW |
2548 | } |
2549 | ||
f6322edd | 2550 | static void queue_request(struct intel_engine_cs *engine, |
672c368f | 2551 | struct i915_request *rq) |
27606fd8 | 2552 | { |
672c368f CW |
2553 | GEM_BUG_ON(!list_empty(&rq->sched.link)); |
2554 | list_add_tail(&rq->sched.link, | |
d2a31d02 MB |
2555 | i915_sched_lookup_priolist(engine->sched_engine, |
2556 | rq_prio(rq))); | |
672c368f | 2557 | set_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); |
9512f985 CW |
2558 | } |
2559 | ||
16f2941a | 2560 | static bool submit_queue(struct intel_engine_cs *engine, |
22b7a426 | 2561 | const struct i915_request *rq) |
f6322edd | 2562 | { |
3e28d371 | 2563 | struct i915_sched_engine *sched_engine = engine->sched_engine; |
22b7a426 | 2564 | |
3e28d371 | 2565 | if (rq_prio(rq) <= sched_engine->queue_priority_hint) |
16f2941a | 2566 | return false; |
22b7a426 | 2567 | |
3e28d371 | 2568 | sched_engine->queue_priority_hint = rq_prio(rq); |
16f2941a | 2569 | return true; |
27606fd8 CW |
2570 | } |
2571 | ||
32ff621f CW |
2572 | static bool ancestor_on_hold(const struct intel_engine_cs *engine, |
2573 | const struct i915_request *rq) | |
2574 | { | |
2575 | GEM_BUG_ON(i915_request_on_hold(rq)); | |
349a2bc5 | 2576 | return !list_empty(&engine->sched_engine->hold) && hold_request(rq); |
32ff621f CW |
2577 | } |
2578 | ||
e61e0f51 | 2579 | static void execlists_submit_request(struct i915_request *request) |
acdd884a | 2580 | { |
4a570db5 | 2581 | struct intel_engine_cs *engine = request->engine; |
5590af3e | 2582 | unsigned long flags; |
acdd884a | 2583 | |
663f71e7 | 2584 | /* Will be called from irq-context when using foreign fences. */ |
349a2bc5 | 2585 | spin_lock_irqsave(&engine->sched_engine->lock, flags); |
acdd884a | 2586 | |
32ff621f | 2587 | if (unlikely(ancestor_on_hold(engine, request))) { |
26208d87 | 2588 | RQ_TRACE(request, "ancestor on hold\n"); |
349a2bc5 MB |
2589 | list_add_tail(&request->sched.link, |
2590 | &engine->sched_engine->hold); | |
32ff621f CW |
2591 | i915_request_set_hold(request); |
2592 | } else { | |
2593 | queue_request(engine, request); | |
acdd884a | 2594 | |
074bb195 | 2595 | GEM_BUG_ON(i915_sched_engine_is_empty(engine->sched_engine)); |
32ff621f | 2596 | GEM_BUG_ON(list_empty(&request->sched.link)); |
6c067579 | 2597 | |
16f2941a CW |
2598 | if (submit_queue(engine, request)) |
2599 | __execlists_kick(&engine->execlists); | |
32ff621f | 2600 | } |
9512f985 | 2601 | |
349a2bc5 | 2602 | spin_unlock_irqrestore(&engine->sched_engine->lock, flags); |
acdd884a MT |
2603 | } |
2604 | ||
9c01524d ML |
2605 | static int |
2606 | __execlists_context_pre_pin(struct intel_context *ce, | |
2607 | struct intel_engine_cs *engine, | |
2608 | struct i915_gem_ww_ctx *ww, void **vaddr) | |
2609 | { | |
2610 | int err; | |
2611 | ||
2612 | err = lrc_pre_pin(ce, engine, ww, vaddr); | |
2613 | if (err) | |
2614 | return err; | |
2615 | ||
2616 | if (!__test_and_set_bit(CONTEXT_INIT_BIT, &ce->flags)) { | |
2617 | lrc_init_state(ce, engine, *vaddr); | |
2618 | ||
058d7d62 | 2619 | __i915_gem_object_flush_map(ce->state->obj, 0, engine->context_size); |
9c01524d ML |
2620 | } |
2621 | ||
2622 | return 0; | |
2623 | } | |
2624 | ||
a0d3fdb6 CW |
2625 | static int execlists_context_pre_pin(struct intel_context *ce, |
2626 | struct i915_gem_ww_ctx *ww, | |
2627 | void **vaddr) | |
c4d52feb | 2628 | { |
9c01524d | 2629 | return __execlists_context_pre_pin(ce, ce->engine, ww, vaddr); |
685d2109 MK |
2630 | } |
2631 | ||
a0d3fdb6 | 2632 | static int execlists_context_pin(struct intel_context *ce, void *vaddr) |
685d2109 | 2633 | { |
a0d3fdb6 | 2634 | return lrc_pin(ce, ce->engine, vaddr); |
685d2109 MK |
2635 | } |
2636 | ||
4c60b1aa CW |
2637 | static int execlists_context_alloc(struct intel_context *ce) |
2638 | { | |
093a0bea | 2639 | return lrc_alloc(ce, ce->engine); |
4c60b1aa CW |
2640 | } |
2641 | ||
62eaf0ae MB |
2642 | static void execlists_context_cancel_request(struct intel_context *ce, |
2643 | struct i915_request *rq) | |
2644 | { | |
2645 | struct intel_engine_cs *engine = NULL; | |
2646 | ||
2647 | i915_request_active_engine(rq, &engine); | |
2648 | ||
2649 | if (engine && intel_engine_pulse(engine)) | |
2650 | intel_gt_handle_error(engine->gt, engine->mask, 0, | |
2651 | "request cancellation by %s", | |
2652 | current->comm); | |
2653 | } | |
2654 | ||
a88afcfa MB |
2655 | static struct intel_context * |
2656 | execlists_create_parallel(struct intel_engine_cs **engines, | |
2657 | unsigned int num_siblings, | |
2658 | unsigned int width) | |
2659 | { | |
2660 | struct intel_context *parent = NULL, *ce, *err; | |
2661 | int i; | |
2662 | ||
2663 | GEM_BUG_ON(num_siblings != 1); | |
2664 | ||
2665 | for (i = 0; i < width; ++i) { | |
2666 | ce = intel_context_create(engines[i]); | |
2667 | if (IS_ERR(ce)) { | |
2668 | err = ce; | |
2669 | goto unwind; | |
2670 | } | |
2671 | ||
2672 | if (i == 0) | |
2673 | parent = ce; | |
2674 | else | |
2675 | intel_context_bind_parent_child(parent, ce); | |
2676 | } | |
2677 | ||
2678 | parent->parallel.fence_context = dma_fence_context_alloc(1); | |
2679 | ||
2680 | intel_context_set_nopreempt(parent); | |
2681 | for_each_child(parent, ce) | |
2682 | intel_context_set_nopreempt(ce); | |
2683 | ||
2684 | return parent; | |
2685 | ||
2686 | unwind: | |
2687 | if (parent) | |
2688 | intel_context_put(parent); | |
2689 | return err; | |
2690 | } | |
2691 | ||
4dc84b77 | 2692 | static const struct intel_context_ops execlists_context_ops = { |
bb6287cb | 2693 | .flags = COPS_HAS_INFLIGHT | COPS_RUNTIME_CYCLES, |
cc1557ca | 2694 | |
4c60b1aa CW |
2695 | .alloc = execlists_context_alloc, |
2696 | ||
62eaf0ae MB |
2697 | .cancel_request = execlists_context_cancel_request, |
2698 | ||
3999a708 | 2699 | .pre_pin = execlists_context_pre_pin, |
95f697eb | 2700 | .pin = execlists_context_pin, |
a0d3fdb6 CW |
2701 | .unpin = lrc_unpin, |
2702 | .post_unpin = lrc_post_unpin, | |
9726920b | 2703 | |
6eee33e8 CW |
2704 | .enter = intel_context_enter_engine, |
2705 | .exit = intel_context_exit_engine, | |
2706 | ||
a0d3fdb6 CW |
2707 | .reset = lrc_reset, |
2708 | .destroy = lrc_destroy, | |
55612025 | 2709 | |
a88afcfa | 2710 | .create_parallel = execlists_create_parallel, |
55612025 | 2711 | .create_virtual = execlists_create_virtual, |
4dc84b77 CW |
2712 | }; |
2713 | ||
1c8ee8b9 CW |
2714 | static int emit_pdps(struct i915_request *rq) |
2715 | { | |
2716 | const struct intel_engine_cs * const engine = rq->engine; | |
2717 | struct i915_ppgtt * const ppgtt = i915_vm_to_ppgtt(rq->context->vm); | |
2718 | int err, i; | |
2719 | u32 *cs; | |
2720 | ||
d3f23ab9 | 2721 | GEM_BUG_ON(intel_vgpu_active(rq->i915)); |
1c8ee8b9 CW |
2722 | |
2723 | /* | |
2724 | * Beware ye of the dragons, this sequence is magic! | |
2725 | * | |
2726 | * Small changes to this sequence can cause anything from | |
2727 | * GPU hangs to forcewake errors and machine lockups! | |
2728 | */ | |
2729 | ||
0da3f250 CW |
2730 | cs = intel_ring_begin(rq, 2); |
2731 | if (IS_ERR(cs)) | |
2732 | return PTR_ERR(cs); | |
2733 | ||
2734 | *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE; | |
2735 | *cs++ = MI_NOOP; | |
2736 | intel_ring_advance(rq, cs); | |
2737 | ||
1c8ee8b9 CW |
2738 | /* Flush any residual operations from the context load */ |
2739 | err = engine->emit_flush(rq, EMIT_FLUSH); | |
2740 | if (err) | |
2741 | return err; | |
2742 | ||
2743 | /* Magic required to prevent forcewake errors! */ | |
2744 | err = engine->emit_flush(rq, EMIT_INVALIDATE); | |
2745 | if (err) | |
2746 | return err; | |
2747 | ||
2748 | cs = intel_ring_begin(rq, 4 * GEN8_3LVL_PDPES + 2); | |
2749 | if (IS_ERR(cs)) | |
2750 | return PTR_ERR(cs); | |
2751 | ||
2752 | /* Ensure the LRI have landed before we invalidate & continue */ | |
2753 | *cs++ = MI_LOAD_REGISTER_IMM(2 * GEN8_3LVL_PDPES) | MI_LRI_FORCE_POSTED; | |
2754 | for (i = GEN8_3LVL_PDPES; i--; ) { | |
2755 | const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i); | |
2756 | u32 base = engine->mmio_base; | |
2757 | ||
2758 | *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(base, i)); | |
2759 | *cs++ = upper_32_bits(pd_daddr); | |
2760 | *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(base, i)); | |
2761 | *cs++ = lower_32_bits(pd_daddr); | |
2762 | } | |
0da3f250 CW |
2763 | *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE; |
2764 | intel_ring_advance(rq, cs); | |
1c8ee8b9 CW |
2765 | |
2766 | intel_ring_advance(rq, cs); | |
2767 | ||
2768 | return 0; | |
2769 | } | |
2770 | ||
e61e0f51 | 2771 | static int execlists_request_alloc(struct i915_request *request) |
ef11c01d | 2772 | { |
fd138212 | 2773 | int ret; |
ef11c01d | 2774 | |
9f3ccd40 | 2775 | GEM_BUG_ON(!intel_context_is_pinned(request->context)); |
e8a9c58f | 2776 | |
5f5800a7 CW |
2777 | /* |
2778 | * Flush enough space to reduce the likelihood of waiting after | |
ef11c01d CW |
2779 | * we start building the request - in which case we will just |
2780 | * have to repeat work. | |
2781 | */ | |
2782 | request->reserved_space += EXECLISTS_REQUEST_SIZE; | |
2783 | ||
5f5800a7 CW |
2784 | /* |
2785 | * Note that after this point, we have committed to using | |
ef11c01d CW |
2786 | * this request as it is being used to both track the |
2787 | * state of engine initialisation and liveness of the | |
2788 | * golden renderstate above. Think twice before you try | |
2789 | * to cancel/unwind this request now. | |
2790 | */ | |
2791 | ||
1c8ee8b9 CW |
2792 | if (!i915_vm_is_4lvl(request->context->vm)) { |
2793 | ret = emit_pdps(request); | |
2794 | if (ret) | |
2795 | return ret; | |
2796 | } | |
2797 | ||
e8894267 | 2798 | /* Unconditionally invalidate GPU caches and TLBs. */ |
0b718ba1 | 2799 | ret = request->engine->emit_flush(request, EMIT_INVALIDATE); |
e8894267 CW |
2800 | if (ret) |
2801 | return ret; | |
2802 | ||
ef11c01d CW |
2803 | request->reserved_space -= EXECLISTS_REQUEST_SIZE; |
2804 | return 0; | |
ef11c01d CW |
2805 | } |
2806 | ||
23122a4d CW |
2807 | static void reset_csb_pointers(struct intel_engine_cs *engine) |
2808 | { | |
2809 | struct intel_engine_execlists * const execlists = &engine->execlists; | |
2810 | const unsigned int reset_value = execlists->csb_size - 1; | |
2811 | ||
2812 | ring_set_paused(engine, 0); | |
2813 | ||
b428d570 CW |
2814 | /* |
2815 | * Sometimes Icelake forgets to reset its pointers on a GPU reset. | |
2816 | * Bludgeon them with a mmio update to be sure. | |
2817 | */ | |
2818 | ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR, | |
2819 | 0xffff << 16 | reset_value << 8 | reset_value); | |
2820 | ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR); | |
2821 | ||
23122a4d CW |
2822 | /* |
2823 | * After a reset, the HW starts writing into CSB entry [0]. We | |
2824 | * therefore have to set our HEAD pointer back one entry so that | |
2825 | * the *first* entry we check is entry 0. To complicate this further, | |
2826 | * as we don't wait for the first interrupt after reset, we have to | |
2827 | * fake the HW write to point back to the last entry so that our | |
2828 | * inline comparison of our cached head position against the last HW | |
2829 | * write works even before the first interrupt. | |
2830 | */ | |
2831 | execlists->csb_head = reset_value; | |
2832 | WRITE_ONCE(*execlists->csb_write, reset_value); | |
2833 | wmb(); /* Make sure this is visible to HW (paranoia?) */ | |
2834 | ||
233c1ae3 CW |
2835 | /* Check that the GPU does indeed update the CSB entries! */ |
2836 | memset(execlists->csb_status, -1, (reset_value + 1) * sizeof(u64)); | |
dc040682 MC |
2837 | drm_clflush_virt_range(execlists->csb_status, |
2838 | execlists->csb_size * | |
2839 | sizeof(execlists->csb_status)); | |
b428d570 CW |
2840 | |
2841 | /* Once more for luck and our trusty paranoia */ | |
23122a4d | 2842 | ENGINE_WRITE(engine, RING_CONTEXT_STATUS_PTR, |
b428d570 | 2843 | 0xffff << 16 | reset_value << 8 | reset_value); |
23122a4d CW |
2844 | ENGINE_POSTING_READ(engine, RING_CONTEXT_STATUS_PTR); |
2845 | ||
b428d570 | 2846 | GEM_BUG_ON(READ_ONCE(*execlists->csb_write) != reset_value); |
23122a4d CW |
2847 | } |
2848 | ||
b436a5f8 CW |
2849 | static void sanitize_hwsp(struct intel_engine_cs *engine) |
2850 | { | |
2851 | struct intel_timeline *tl; | |
2852 | ||
2853 | list_for_each_entry(tl, &engine->status_page.timelines, engine_link) | |
2854 | intel_timeline_reset_seqno(tl); | |
2855 | } | |
2856 | ||
23122a4d CW |
2857 | static void execlists_sanitize(struct intel_engine_cs *engine) |
2858 | { | |
89db9537 CW |
2859 | GEM_BUG_ON(execlists_active(&engine->execlists)); |
2860 | ||
bd3ec9e7 CW |
2861 | /* |
2862 | * Poison residual state on resume, in case the suspend didn't! | |
2863 | * | |
2864 | * We have to assume that across suspend/resume (or other loss | |
2865 | * of control) that the contents of our pinned buffers has been | |
2866 | * lost, replaced by garbage. Since this doesn't always happen, | |
2867 | * let's poison such state so that we more quickly spot when | |
2868 | * we falsely assume it has been preserved. | |
2869 | */ | |
2870 | if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) | |
2871 | memset(engine->status_page.addr, POISON_INUSE, PAGE_SIZE); | |
2872 | ||
23122a4d | 2873 | reset_csb_pointers(engine); |
bd3ec9e7 CW |
2874 | |
2875 | /* | |
2876 | * The kernel_context HWSP is stored in the status_page. As above, | |
2877 | * that may be lost on resume/initialisation, and so we need to | |
2878 | * reset the value in the HWSP. | |
2879 | */ | |
b436a5f8 | 2880 | sanitize_hwsp(engine); |
4243cd53 CW |
2881 | |
2882 | /* And scrub the dirty cachelines for the HWSP */ | |
61c5ed94 | 2883 | drm_clflush_virt_range(engine->status_page.addr, PAGE_SIZE); |
3e42cc61 TH |
2884 | |
2885 | intel_engine_reset_pinned_contexts(engine); | |
23122a4d CW |
2886 | } |
2887 | ||
70a76a9b CW |
2888 | static void enable_error_interrupt(struct intel_engine_cs *engine) |
2889 | { | |
2890 | u32 status; | |
2891 | ||
2892 | engine->execlists.error_interrupt = 0; | |
2893 | ENGINE_WRITE(engine, RING_EMR, ~0u); | |
2894 | ENGINE_WRITE(engine, RING_EIR, ~0u); /* clear all existing errors */ | |
2895 | ||
2896 | status = ENGINE_READ(engine, RING_ESR); | |
2897 | if (unlikely(status)) { | |
dc483ba5 | 2898 | drm_err(&engine->i915->drm, |
70a76a9b CW |
2899 | "engine '%s' resumed still in error: %08x\n", |
2900 | engine->name, status); | |
2901 | __intel_gt_reset(engine->gt, engine->mask); | |
2902 | } | |
2903 | ||
2904 | /* | |
2905 | * On current gen8+, we have 2 signals to play with | |
2906 | * | |
2907 | * - I915_ERROR_INSTUCTION (bit 0) | |
2908 | * | |
2909 | * Generate an error if the command parser encounters an invalid | |
2910 | * instruction | |
2911 | * | |
2912 | * This is a fatal error. | |
2913 | * | |
2914 | * - CP_PRIV (bit 2) | |
2915 | * | |
2916 | * Generate an error on privilege violation (where the CP replaces | |
2917 | * the instruction with a no-op). This also fires for writes into | |
2918 | * read-only scratch pages. | |
2919 | * | |
2920 | * This is a non-fatal error, parsing continues. | |
2921 | * | |
2922 | * * there are a few others defined for odd HW that we do not use | |
2923 | * | |
2924 | * Since CP_PRIV fires for cases where we have chosen to ignore the | |
2925 | * error (as the HW is validating and suppressing the mistakes), we | |
2926 | * only unmask the instruction error bit. | |
2927 | */ | |
2928 | ENGINE_WRITE(engine, RING_EMR, ~I915_ERROR_INSTRUCTION); | |
2929 | } | |
2930 | ||
f3c9d407 | 2931 | static void enable_execlists(struct intel_engine_cs *engine) |
9b1136d5 | 2932 | { |
313443b1 CW |
2933 | u32 mode; |
2934 | ||
2935 | assert_forcewakes_active(engine->uncore, FORCEWAKE_ALL); | |
2936 | ||
060f2322 | 2937 | intel_engine_set_hwsp_writemask(engine, ~0u); /* HWSTAM */ |
225701fc | 2938 | |
c816723b | 2939 | if (GRAPHICS_VER(engine->i915) >= 11) |
313443b1 | 2940 | mode = _MASKED_BIT_ENABLE(GEN11_GFX_DISABLE_LEGACY_MODE); |
225701fc | 2941 | else |
313443b1 CW |
2942 | mode = _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE); |
2943 | ENGINE_WRITE_FW(engine, RING_MODE_GEN7, mode); | |
225701fc | 2944 | |
313443b1 | 2945 | ENGINE_WRITE_FW(engine, RING_MI_MODE, _MASKED_BIT_DISABLE(STOP_RING)); |
9a4dc803 | 2946 | |
313443b1 CW |
2947 | ENGINE_WRITE_FW(engine, |
2948 | RING_HWS_PGA, | |
2949 | i915_ggtt_offset(engine->status_page.vma)); | |
dbc65183 | 2950 | ENGINE_POSTING_READ(engine, RING_HWS_PGA); |
7b02b23e | 2951 | |
70a76a9b | 2952 | enable_error_interrupt(engine); |
f3c9d407 CW |
2953 | } |
2954 | ||
79ffac85 | 2955 | static int execlists_resume(struct intel_engine_cs *engine) |
f3c9d407 | 2956 | { |
805615da | 2957 | intel_mocs_init_engine(engine); |
b3786b29 | 2958 | intel_breadcrumbs_reset(engine->breadcrumbs); |
821ed7df | 2959 | |
f3c9d407 | 2960 | enable_execlists(engine); |
9b1136d5 | 2961 | |
f9576e36 | 2962 | if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE) |
87cb6d80 MR |
2963 | xehp_enable_ccs_engines(engine); |
2964 | ||
821ed7df | 2965 | return 0; |
9b1136d5 OM |
2966 | } |
2967 | ||
eb8d0f5a | 2968 | static void execlists_reset_prepare(struct intel_engine_cs *engine) |
5adfb772 | 2969 | { |
639f2f24 | 2970 | ENGINE_TRACE(engine, "depth<-%d\n", |
22916bad | 2971 | atomic_read(&engine->sched_engine->tasklet.count)); |
5adfb772 CW |
2972 | |
2973 | /* | |
2974 | * Prevent request submission to the hardware until we have | |
2975 | * completed the reset in i915_gem_reset_finish(). If a request | |
2976 | * is completed by one engine, it may then queue a request | |
2977 | * to a second via its execlists->tasklet *just* as we are | |
79ffac85 | 2978 | * calling engine->resume() and also writing the ELSP. |
5adfb772 CW |
2979 | * Turning off the execlists->tasklet until the reset is over |
2980 | * prevents the race. | |
2981 | */ | |
22916bad MB |
2982 | __tasklet_disable_sync_once(&engine->sched_engine->tasklet); |
2983 | GEM_BUG_ON(!reset_in_progress(engine)); | |
5adfb772 | 2984 | |
c30d5dc6 CW |
2985 | /* |
2986 | * We stop engines, otherwise we might get failed reset and a | |
2987 | * dead gpu (on elk). Also as modern gpu as kbl can suffer | |
2988 | * from system hang if batchbuffer is progressing when | |
2989 | * the reset is issued, regardless of READY_TO_RESET ack. | |
2990 | * Thus assume it is best to stop engines on all gens | |
2991 | * where we have a gpu reset. | |
2992 | * | |
2993 | * WaKBLVECSSemaphoreWaitPoll:kbl (on ALL_ENGINES) | |
2994 | * | |
2995 | * FIXME: Wa for more modern gens needs to be validated | |
2996 | */ | |
91715555 | 2997 | ring_set_paused(engine, 1); |
c30d5dc6 | 2998 | intel_engine_stop_cs(engine); |
b68be5c6 | 2999 | |
0667429c | 3000 | /* |
41bb543f | 3001 | * Wa_22011802037: In addition to stopping the cs, we need |
0667429c UNR |
3002 | * to wait for any pending mi force wakeups |
3003 | */ | |
41bb543f MR |
3004 | if (IS_MTL_GRAPHICS_STEP(engine->i915, M, STEP_A0, STEP_B0) || |
3005 | (GRAPHICS_VER(engine->i915) >= 11 && | |
3006 | GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 70))) | |
0667429c UNR |
3007 | intel_engine_wait_for_pending_mi_fw(engine); |
3008 | ||
b68be5c6 | 3009 | engine->execlists.reset_ccid = active_ccid(engine); |
5adfb772 CW |
3010 | } |
3011 | ||
6f0726b4 CW |
3012 | static struct i915_request ** |
3013 | reset_csb(struct intel_engine_cs *engine, struct i915_request **inactive) | |
821ed7df | 3014 | { |
b620e870 | 3015 | struct intel_engine_execlists * const execlists = &engine->execlists; |
cdb6ded4 | 3016 | |
92b0cba4 MC |
3017 | drm_clflush_virt_range(execlists->csb_write, |
3018 | sizeof(execlists->csb_write[0])); | |
582a6f90 | 3019 | |
6f0726b4 | 3020 | inactive = process_csb(engine, inactive); /* drain preemption events */ |
1863e302 CW |
3021 | |
3022 | /* Following the reset, we need to reload the CSB read/write pointers */ | |
22b7a426 | 3023 | reset_csb_pointers(engine); |
1863e302 | 3024 | |
6f0726b4 CW |
3025 | return inactive; |
3026 | } | |
3027 | ||
3028 | static void | |
3029 | execlists_reset_active(struct intel_engine_cs *engine, bool stalled) | |
3030 | { | |
3031 | struct intel_context *ce; | |
3032 | struct i915_request *rq; | |
3033 | u32 head; | |
3034 | ||
1863e302 CW |
3035 | /* |
3036 | * Save the currently executing context, even if we completed | |
3037 | * its request, it was still running at the time of the | |
3038 | * reset and will have been clobbered. | |
3039 | */ | |
b68be5c6 | 3040 | rq = active_context(engine, engine->execlists.reset_ccid); |
22b7a426 | 3041 | if (!rq) |
6f0726b4 | 3042 | return; |
1863e302 | 3043 | |
9f3ccd40 | 3044 | ce = rq->context; |
22b7a426 | 3045 | GEM_BUG_ON(!i915_vma_is_pinned(ce->state)); |
dffa8feb | 3046 | |
6f0726b4 | 3047 | if (__i915_request_is_complete(rq)) { |
bd9bec5b | 3048 | /* Idle context; tidy up the ring so we can restart afresh */ |
42827350 | 3049 | head = intel_ring_wrap(ce->ring, rq->tail); |
1863e302 | 3050 | goto out_replay; |
22b7a426 CW |
3051 | } |
3052 | ||
d3b03d8b CW |
3053 | /* We still have requests in-flight; the engine should be active */ |
3054 | GEM_BUG_ON(!intel_engine_pm_is_awake(engine)); | |
3055 | ||
bd9bec5b CW |
3056 | /* Context has requests still in-flight; it should not be idle! */ |
3057 | GEM_BUG_ON(i915_active_is_idle(&ce->active)); | |
d3b03d8b | 3058 | |
a7f328fc | 3059 | rq = active_request(ce->timeline, rq); |
42827350 CW |
3060 | head = intel_ring_wrap(ce->ring, rq->head); |
3061 | GEM_BUG_ON(head == ce->ring->tail); | |
1863e302 | 3062 | |
21182b3c CW |
3063 | /* |
3064 | * If this request hasn't started yet, e.g. it is waiting on a | |
3065 | * semaphore, we need to avoid skipping the request or else we | |
3066 | * break the signaling chain. However, if the context is corrupt | |
3067 | * the request will not restart and we will be stuck with a wedged | |
3068 | * device. It is quite often the case that if we issue a reset | |
3069 | * while the GPU is loading the context image, that the context | |
3070 | * image becomes corrupt. | |
3071 | * | |
3072 | * Otherwise, if we have not started yet, the request should replay | |
3073 | * perfectly and we do not need to flag the result as being erroneous. | |
3074 | */ | |
6f0726b4 | 3075 | if (!__i915_request_has_started(rq)) |
1863e302 | 3076 | goto out_replay; |
21182b3c | 3077 | |
a3e38836 CW |
3078 | /* |
3079 | * If the request was innocent, we leave the request in the ELSP | |
c0dcb203 CW |
3080 | * and will try to replay it on restarting. The context image may |
3081 | * have been corrupted by the reset, in which case we may have | |
3082 | * to service a new GPU hang, but more likely we can continue on | |
3083 | * without impact. | |
3084 | * | |
3085 | * If the request was guilty, we presume the context is corrupt | |
3086 | * and have to at least restore the RING register in the context | |
3087 | * image back to the expected values to skip over the guilty request. | |
3088 | */ | |
cb823ed9 | 3089 | __i915_request_reset(rq, stalled); |
821ed7df | 3090 | |
a3e38836 CW |
3091 | /* |
3092 | * We want a simple context + ring to execute the breadcrumb update. | |
a3aabe86 CW |
3093 | * We cannot rely on the context being intact across the GPU hang, |
3094 | * so clear it and rebuild just what we need for the breadcrumb. | |
3095 | * All pending requests for this context will be zapped, and any | |
3096 | * future request will be after userspace has had the opportunity | |
3097 | * to recreate its own state. | |
3098 | */ | |
1863e302 | 3099 | out_replay: |
639f2f24 | 3100 | ENGINE_TRACE(engine, "replay {head:%04x, tail:%04x}\n", |
42827350 | 3101 | head, ce->ring->tail); |
a0d3fdb6 CW |
3102 | lrc_reset_regs(ce, engine); |
3103 | ce->lrc.lrca = lrc_update_regs(ce, engine, head); | |
6f0726b4 | 3104 | } |
1863e302 | 3105 | |
6f0726b4 CW |
3106 | static void execlists_reset_csb(struct intel_engine_cs *engine, bool stalled) |
3107 | { | |
3108 | struct intel_engine_execlists * const execlists = &engine->execlists; | |
3109 | struct i915_request *post[2 * EXECLIST_MAX_PORTS]; | |
3110 | struct i915_request **inactive; | |
3111 | ||
3112 | rcu_read_lock(); | |
3113 | inactive = reset_csb(engine, post); | |
3114 | ||
3115 | execlists_reset_active(engine, true); | |
3116 | ||
3117 | inactive = cancel_port_requests(execlists, inactive); | |
3118 | post_process_csb(post, inactive); | |
3119 | rcu_read_unlock(); | |
1863e302 | 3120 | } |
8e525cb4 | 3121 | |
e26b6d43 | 3122 | static void execlists_reset_rewind(struct intel_engine_cs *engine, bool stalled) |
1863e302 CW |
3123 | { |
3124 | unsigned long flags; | |
3125 | ||
639f2f24 | 3126 | ENGINE_TRACE(engine, "\n"); |
1863e302 | 3127 | |
6f0726b4 CW |
3128 | /* Process the csb, find the guilty context and throw away */ |
3129 | execlists_reset_csb(engine, stalled); | |
1863e302 | 3130 | |
6f0726b4 CW |
3131 | /* Push back any incomplete requests for replay after the reset. */ |
3132 | rcu_read_lock(); | |
349a2bc5 | 3133 | spin_lock_irqsave(&engine->sched_engine->lock, flags); |
6f0726b4 | 3134 | __unwind_incomplete_requests(engine); |
349a2bc5 | 3135 | spin_unlock_irqrestore(&engine->sched_engine->lock, flags); |
6f0726b4 | 3136 | rcu_read_unlock(); |
1863e302 CW |
3137 | } |
3138 | ||
2913fa4d | 3139 | static void nop_submission_tasklet(struct tasklet_struct *t) |
1863e302 | 3140 | { |
22916bad MB |
3141 | struct i915_sched_engine *sched_engine = |
3142 | from_tasklet(sched_engine, t, tasklet); | |
3143 | struct intel_engine_cs * const engine = sched_engine->private_data; | |
3fc28d3e | 3144 | |
1863e302 | 3145 | /* The driver is wedged; don't process any more events. */ |
3e28d371 | 3146 | WRITE_ONCE(engine->sched_engine->queue_priority_hint, INT_MIN); |
1863e302 CW |
3147 | } |
3148 | ||
e26b6d43 | 3149 | static void execlists_reset_cancel(struct intel_engine_cs *engine) |
1863e302 CW |
3150 | { |
3151 | struct intel_engine_execlists * const execlists = &engine->execlists; | |
3e28d371 | 3152 | struct i915_sched_engine * const sched_engine = engine->sched_engine; |
1863e302 CW |
3153 | struct i915_request *rq, *rn; |
3154 | struct rb_node *rb; | |
3155 | unsigned long flags; | |
3156 | ||
639f2f24 | 3157 | ENGINE_TRACE(engine, "\n"); |
1863e302 CW |
3158 | |
3159 | /* | |
3160 | * Before we call engine->cancel_requests(), we should have exclusive | |
3161 | * access to the submission state. This is arranged for us by the | |
3162 | * caller disabling the interrupt generation, the tasklet and other | |
3163 | * threads that may then access the same state, giving us a free hand | |
3164 | * to reset state. However, we still need to let lockdep be aware that | |
3165 | * we know this state may be accessed in hardirq context, so we | |
3166 | * disable the irq around this manipulation and we want to keep | |
3167 | * the spinlock focused on its duties and not accidentally conflate | |
3168 | * coverage to the submission's irq state. (Similarly, although we | |
3169 | * shouldn't need to disable irq around the manipulation of the | |
3170 | * submission's irq state, we also wish to remind ourselves that | |
3171 | * it is irq state.) | |
3172 | */ | |
6f0726b4 | 3173 | execlists_reset_csb(engine, true); |
1863e302 | 3174 | |
6f0726b4 | 3175 | rcu_read_lock(); |
349a2bc5 | 3176 | spin_lock_irqsave(&engine->sched_engine->lock, flags); |
1863e302 CW |
3177 | |
3178 | /* Mark all executing requests as skipped. */ | |
349a2bc5 | 3179 | list_for_each_entry(rq, &engine->sched_engine->requests, sched.link) |
c10e4a79 | 3180 | i915_request_put(i915_request_mark_eio(rq)); |
5e39b4d9 | 3181 | intel_engine_signal_breadcrumbs(engine); |
1863e302 CW |
3182 | |
3183 | /* Flush the queued requests to the timeline list (for retiring). */ | |
3e28d371 | 3184 | while ((rb = rb_first_cached(&sched_engine->queue))) { |
1863e302 | 3185 | struct i915_priolist *p = to_priolist(rb); |
1863e302 | 3186 | |
2867ff6c | 3187 | priolist_for_each_request_consume(rq, rn, p) { |
c10e4a79 CW |
3188 | if (i915_request_mark_eio(rq)) { |
3189 | __i915_request_submit(rq); | |
3190 | i915_request_put(rq); | |
3191 | } | |
1863e302 CW |
3192 | } |
3193 | ||
3e28d371 | 3194 | rb_erase_cached(&p->node, &sched_engine->queue); |
1863e302 CW |
3195 | i915_priolist_free(p); |
3196 | } | |
3197 | ||
32ff621f | 3198 | /* On-hold requests will be flushed to timeline upon their release */ |
349a2bc5 | 3199 | list_for_each_entry(rq, &sched_engine->hold, sched.link) |
c10e4a79 | 3200 | i915_request_put(i915_request_mark_eio(rq)); |
32ff621f | 3201 | |
6d06779e CW |
3202 | /* Cancel all attached virtual engines */ |
3203 | while ((rb = rb_first_cached(&execlists->virtual))) { | |
3204 | struct virtual_engine *ve = | |
3205 | rb_entry(rb, typeof(*ve), nodes[engine->id].rb); | |
3206 | ||
3207 | rb_erase_cached(rb, &execlists->virtual); | |
3208 | RB_CLEAR_NODE(rb); | |
3209 | ||
349a2bc5 | 3210 | spin_lock(&ve->base.sched_engine->lock); |
0d7cf7bc CW |
3211 | rq = fetch_and_zero(&ve->request); |
3212 | if (rq) { | |
c10e4a79 CW |
3213 | if (i915_request_mark_eio(rq)) { |
3214 | rq->engine = engine; | |
3215 | __i915_request_submit(rq); | |
3216 | i915_request_put(rq); | |
3217 | } | |
b647c7df | 3218 | i915_request_put(rq); |
0d7cf7bc | 3219 | |
3e28d371 | 3220 | ve->base.sched_engine->queue_priority_hint = INT_MIN; |
6d06779e | 3221 | } |
349a2bc5 | 3222 | spin_unlock(&ve->base.sched_engine->lock); |
6d06779e CW |
3223 | } |
3224 | ||
1863e302 CW |
3225 | /* Remaining _unready_ requests will be nop'ed when submitted */ |
3226 | ||
3e28d371 MB |
3227 | sched_engine->queue_priority_hint = INT_MIN; |
3228 | sched_engine->queue = RB_ROOT_CACHED; | |
1863e302 | 3229 | |
22916bad MB |
3230 | GEM_BUG_ON(__tasklet_is_enabled(&engine->sched_engine->tasklet)); |
3231 | engine->sched_engine->tasklet.callback = nop_submission_tasklet; | |
8e525cb4 | 3232 | |
349a2bc5 | 3233 | spin_unlock_irqrestore(&engine->sched_engine->lock, flags); |
6f0726b4 | 3234 | rcu_read_unlock(); |
821ed7df CW |
3235 | } |
3236 | ||
5adfb772 CW |
3237 | static void execlists_reset_finish(struct intel_engine_cs *engine) |
3238 | { | |
5db1d4ea CW |
3239 | struct intel_engine_execlists * const execlists = &engine->execlists; |
3240 | ||
fe25f304 | 3241 | /* |
9e4fa012 CW |
3242 | * After a GPU reset, we may have requests to replay. Do so now while |
3243 | * we still have the forcewake to be sure that the GPU is not allowed | |
3244 | * to sleep before we restart and reload a context. | |
0a7d355e CW |
3245 | * |
3246 | * If the GPU reset fails, the engine may still be alive with requests | |
3247 | * inflight. We expect those to complete, or for the device to be | |
3248 | * reset as the next level of recovery, and as a final resort we | |
3249 | * will declare the device wedged. | |
fe25f304 | 3250 | */ |
22916bad | 3251 | GEM_BUG_ON(!reset_in_progress(engine)); |
5adfb772 | 3252 | |
16f2941a | 3253 | /* And kick in case we missed a new request submission. */ |
22916bad | 3254 | if (__tasklet_enable(&engine->sched_engine->tasklet)) |
16f2941a CW |
3255 | __execlists_kick(execlists); |
3256 | ||
639f2f24 | 3257 | ENGINE_TRACE(engine, "depth->%d\n", |
22916bad | 3258 | atomic_read(&engine->sched_engine->tasklet.count)); |
5adfb772 CW |
3259 | } |
3260 | ||
31bb59cc | 3261 | static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine) |
73d477f6 | 3262 | { |
baba6e57 DCS |
3263 | ENGINE_WRITE(engine, RING_IMR, |
3264 | ~(engine->irq_enable_mask | engine->irq_keep_mask)); | |
3265 | ENGINE_POSTING_READ(engine, RING_IMR); | |
73d477f6 OM |
3266 | } |
3267 | ||
31bb59cc | 3268 | static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine) |
73d477f6 | 3269 | { |
baba6e57 | 3270 | ENGINE_WRITE(engine, RING_IMR, ~engine->irq_keep_mask); |
73d477f6 OM |
3271 | } |
3272 | ||
c34c5bca CW |
3273 | static void execlists_park(struct intel_engine_cs *engine) |
3274 | { | |
2229adc8 | 3275 | cancel_timer(&engine->execlists.timer); |
3a7a92ab | 3276 | cancel_timer(&engine->execlists.preempt); |
c34c5bca CW |
3277 | } |
3278 | ||
d1cee2d3 MB |
3279 | static void add_to_engine(struct i915_request *rq) |
3280 | { | |
3281 | lockdep_assert_held(&rq->engine->sched_engine->lock); | |
3282 | list_move_tail(&rq->sched.link, &rq->engine->sched_engine->requests); | |
3283 | } | |
3284 | ||
3285 | static void remove_from_engine(struct i915_request *rq) | |
3286 | { | |
3287 | struct intel_engine_cs *engine, *locked; | |
3288 | ||
3289 | /* | |
3290 | * Virtual engines complicate acquiring the engine timeline lock, | |
3291 | * as their rq->engine pointer is not stable until under that | |
3292 | * engine lock. The simple ploy we use is to take the lock then | |
3293 | * check that the rq still belongs to the newly locked engine. | |
3294 | */ | |
3295 | locked = READ_ONCE(rq->engine); | |
3296 | spin_lock_irq(&locked->sched_engine->lock); | |
3297 | while (unlikely(locked != (engine = READ_ONCE(rq->engine)))) { | |
3298 | spin_unlock(&locked->sched_engine->lock); | |
3299 | spin_lock(&engine->sched_engine->lock); | |
3300 | locked = engine; | |
3301 | } | |
3302 | list_del_init(&rq->sched.link); | |
3303 | ||
3304 | clear_bit(I915_FENCE_FLAG_PQUEUE, &rq->fence.flags); | |
3305 | clear_bit(I915_FENCE_FLAG_HOLD, &rq->fence.flags); | |
3306 | ||
3307 | /* Prevent further __await_execution() registering a cb, then flush */ | |
3308 | set_bit(I915_FENCE_FLAG_ACTIVE, &rq->fence.flags); | |
3309 | ||
3310 | spin_unlock_irq(&locked->sched_engine->lock); | |
3311 | ||
3312 | i915_request_notify_execute_cb_imm(rq); | |
3313 | } | |
3314 | ||
751f82b3 CW |
3315 | static bool can_preempt(struct intel_engine_cs *engine) |
3316 | { | |
c816723b | 3317 | if (GRAPHICS_VER(engine->i915) > 8) |
751f82b3 CW |
3318 | return true; |
3319 | ||
3320 | /* GPGPU on bdw requires extra w/a; not implemented */ | |
3321 | return engine->class != RENDER_CLASS; | |
3322 | } | |
3323 | ||
71ed6011 MB |
3324 | static void kick_execlists(const struct i915_request *rq, int prio) |
3325 | { | |
3326 | struct intel_engine_cs *engine = rq->engine; | |
3327 | struct i915_sched_engine *sched_engine = engine->sched_engine; | |
3328 | const struct i915_request *inflight; | |
3329 | ||
3330 | /* | |
3331 | * We only need to kick the tasklet once for the high priority | |
3332 | * new context we add into the queue. | |
3333 | */ | |
3334 | if (prio <= sched_engine->queue_priority_hint) | |
3335 | return; | |
3336 | ||
3337 | rcu_read_lock(); | |
3338 | ||
3339 | /* Nothing currently active? We're overdue for a submission! */ | |
3340 | inflight = execlists_active(&engine->execlists); | |
3341 | if (!inflight) | |
3342 | goto unlock; | |
3343 | ||
3344 | /* | |
3345 | * If we are already the currently executing context, don't | |
3346 | * bother evaluating if we should preempt ourselves. | |
3347 | */ | |
3348 | if (inflight->context == rq->context) | |
3349 | goto unlock; | |
3350 | ||
3351 | ENGINE_TRACE(engine, | |
3352 | "bumping queue-priority-hint:%d for rq:%llx:%lld, inflight:%llx:%lld prio %d\n", | |
3353 | prio, | |
3354 | rq->fence.context, rq->fence.seqno, | |
3355 | inflight->fence.context, inflight->fence.seqno, | |
3356 | inflight->sched.attr.priority); | |
3357 | ||
3358 | sched_engine->queue_priority_hint = prio; | |
3359 | ||
3360 | /* | |
3361 | * Allow preemption of low -> normal -> high, but we do | |
3362 | * not allow low priority tasks to preempt other low priority | |
3363 | * tasks under the impression that latency for low priority | |
3364 | * tasks does not matter (as much as background throughput), | |
3365 | * so kiss. | |
3366 | */ | |
3367 | if (prio >= max(I915_PRIORITY_NORMAL, rq_prio(inflight))) | |
22916bad | 3368 | tasklet_hi_schedule(&sched_engine->tasklet); |
71ed6011 MB |
3369 | |
3370 | unlock: | |
3371 | rcu_read_unlock(); | |
3372 | } | |
3373 | ||
007c4578 | 3374 | static void execlists_set_default_submission(struct intel_engine_cs *engine) |
ddd66c51 | 3375 | { |
ff44ad51 | 3376 | engine->submit_request = execlists_submit_request; |
3f623e06 | 3377 | engine->sched_engine->schedule = i915_schedule; |
71ed6011 | 3378 | engine->sched_engine->kick_backend = kick_execlists; |
22916bad | 3379 | engine->sched_engine->tasklet.callback = execlists_submission_tasklet; |
ddd66c51 CW |
3380 | } |
3381 | ||
97c16353 CW |
3382 | static void execlists_shutdown(struct intel_engine_cs *engine) |
3383 | { | |
3384 | /* Synchronise with residual timers and any softirq they raise */ | |
3385 | del_timer_sync(&engine->execlists.timer); | |
3386 | del_timer_sync(&engine->execlists.preempt); | |
22916bad | 3387 | tasklet_kill(&engine->sched_engine->tasklet); |
97c16353 CW |
3388 | } |
3389 | ||
e26b6d43 | 3390 | static void execlists_release(struct intel_engine_cs *engine) |
45b9c968 | 3391 | { |
bd3ec9e7 CW |
3392 | engine->sanitize = NULL; /* no longer in control, nothing to sanitize */ |
3393 | ||
97c16353 CW |
3394 | execlists_shutdown(engine); |
3395 | ||
45b9c968 | 3396 | intel_engine_cleanup_common(engine); |
a0d3fdb6 | 3397 | lrc_fini_wa_ctx(engine); |
45b9c968 CW |
3398 | } |
3399 | ||
77cdd054 UNR |
3400 | static ktime_t __execlists_engine_busyness(struct intel_engine_cs *engine, |
3401 | ktime_t *now) | |
3402 | { | |
3403 | struct intel_engine_execlists_stats *stats = &engine->stats.execlists; | |
3404 | ktime_t total = stats->total; | |
3405 | ||
3406 | /* | |
3407 | * If the engine is executing something at the moment | |
3408 | * add it to the total. | |
3409 | */ | |
3410 | *now = ktime_get(); | |
3411 | if (READ_ONCE(stats->active)) | |
3412 | total = ktime_add(total, ktime_sub(*now, stats->start)); | |
3413 | ||
3414 | return total; | |
3415 | } | |
3416 | ||
3417 | static ktime_t execlists_engine_busyness(struct intel_engine_cs *engine, | |
3418 | ktime_t *now) | |
3419 | { | |
3420 | struct intel_engine_execlists_stats *stats = &engine->stats.execlists; | |
3421 | unsigned int seq; | |
3422 | ktime_t total; | |
3423 | ||
3424 | do { | |
3425 | seq = read_seqcount_begin(&stats->lock); | |
3426 | total = __execlists_engine_busyness(engine, now); | |
3427 | } while (read_seqcount_retry(&stats->lock, seq)); | |
3428 | ||
3429 | return total; | |
3430 | } | |
3431 | ||
c9cacf93 | 3432 | static void |
e1382efb | 3433 | logical_ring_default_vfuncs(struct intel_engine_cs *engine) |
c9cacf93 | 3434 | { |
1ca9b8da | 3435 | /* Default vfuncs which can be overridden by each engine. */ |
45b9c968 | 3436 | |
79ffac85 | 3437 | engine->resume = execlists_resume; |
5adfb772 | 3438 | |
4dc84b77 | 3439 | engine->cops = &execlists_context_ops; |
f73e7399 | 3440 | engine->request_alloc = execlists_request_alloc; |
d1cee2d3 MB |
3441 | engine->add_active_request = add_to_engine; |
3442 | engine->remove_active_request = remove_from_engine; | |
f73e7399 | 3443 | |
0db3633f CW |
3444 | engine->reset.prepare = execlists_reset_prepare; |
3445 | engine->reset.rewind = execlists_reset_rewind; | |
3446 | engine->reset.cancel = execlists_reset_cancel; | |
3447 | engine->reset.finish = execlists_reset_finish; | |
3448 | ||
3449 | engine->park = execlists_park; | |
3450 | engine->unpark = NULL; | |
3451 | ||
d0d829e5 | 3452 | engine->emit_flush = gen8_emit_flush_xcs; |
85474441 | 3453 | engine->emit_init_breadcrumb = gen8_emit_init_breadcrumb; |
d0d829e5 | 3454 | engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_xcs; |
c816723b | 3455 | if (GRAPHICS_VER(engine->i915) >= 12) { |
d0d829e5 DCS |
3456 | engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_xcs; |
3457 | engine->emit_flush = gen12_emit_flush_xcs; | |
972282c4 | 3458 | } |
007c4578 | 3459 | engine->set_default_submission = execlists_set_default_submission; |
ddd66c51 | 3460 | |
c816723b | 3461 | if (GRAPHICS_VER(engine->i915) < 11) { |
d4ccceb0 TU |
3462 | engine->irq_enable = gen8_logical_ring_enable_irq; |
3463 | engine->irq_disable = gen8_logical_ring_disable_irq; | |
3464 | } else { | |
3465 | /* | |
3466 | * TODO: On Gen11 interrupt masks need to be clear | |
3467 | * to allow C6 entry. Keep interrupts enabled at | |
3468 | * and take the hit of generating extra interrupts | |
3469 | * until a more refined solution exists. | |
3470 | */ | |
3471 | } | |
0669a6e1 | 3472 | intel_engine_set_irq_handler(engine, execlists_irq_handler); |
0db3633f CW |
3473 | |
3474 | engine->flags |= I915_ENGINE_SUPPORTS_STATS; | |
3475 | if (!intel_vgpu_active(engine->i915)) { | |
3476 | engine->flags |= I915_ENGINE_HAS_SEMAPHORES; | |
3477 | if (can_preempt(engine)) { | |
3478 | engine->flags |= I915_ENGINE_HAS_PREEMPTION; | |
1a839e01 | 3479 | if (CONFIG_DRM_I915_TIMESLICE_DURATION) |
0db3633f CW |
3480 | engine->flags |= I915_ENGINE_HAS_TIMESLICES; |
3481 | } | |
3482 | } | |
3483 | ||
166c44e6 CW |
3484 | if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 50)) { |
3485 | if (intel_engine_has_preemption(engine)) | |
d263545e | 3486 | engine->emit_bb_start = xehp_emit_bb_start; |
166c44e6 | 3487 | else |
d263545e | 3488 | engine->emit_bb_start = xehp_emit_bb_start_noarb; |
166c44e6 CW |
3489 | } else { |
3490 | if (intel_engine_has_preemption(engine)) | |
3491 | engine->emit_bb_start = gen8_emit_bb_start; | |
3492 | else | |
3493 | engine->emit_bb_start = gen8_emit_bb_start_noarb; | |
3494 | } | |
77cdd054 UNR |
3495 | |
3496 | engine->busyness = execlists_engine_busyness; | |
c9cacf93 TU |
3497 | } |
3498 | ||
e7326336 | 3499 | static void logical_ring_default_irqs(struct intel_engine_cs *engine) |
d9f3af96 | 3500 | { |
fa6f071d DCS |
3501 | unsigned int shift = 0; |
3502 | ||
c816723b | 3503 | if (GRAPHICS_VER(engine->i915) < 11) { |
fa6f071d | 3504 | const u8 irq_shifts[] = { |
8a68d464 CW |
3505 | [RCS0] = GEN8_RCS_IRQ_SHIFT, |
3506 | [BCS0] = GEN8_BCS_IRQ_SHIFT, | |
3507 | [VCS0] = GEN8_VCS0_IRQ_SHIFT, | |
3508 | [VCS1] = GEN8_VCS1_IRQ_SHIFT, | |
3509 | [VECS0] = GEN8_VECS_IRQ_SHIFT, | |
fa6f071d DCS |
3510 | }; |
3511 | ||
3512 | shift = irq_shifts[engine->id]; | |
3513 | } | |
3514 | ||
0bc40be8 TU |
3515 | engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift; |
3516 | engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift; | |
70a76a9b | 3517 | engine->irq_keep_mask |= GT_CS_MASTER_ERROR_INTERRUPT << shift; |
c4e8ba73 | 3518 | engine->irq_keep_mask |= GT_WAIT_SEMAPHORE_INTERRUPT << shift; |
d9f3af96 TU |
3519 | } |
3520 | ||
845f7f7e MK |
3521 | static void rcs_submission_override(struct intel_engine_cs *engine) |
3522 | { | |
c816723b | 3523 | switch (GRAPHICS_VER(engine->i915)) { |
845f7f7e | 3524 | case 12: |
d0d829e5 | 3525 | engine->emit_flush = gen12_emit_flush_rcs; |
c210e85b CW |
3526 | engine->emit_fini_breadcrumb = gen12_emit_fini_breadcrumb_rcs; |
3527 | break; | |
845f7f7e | 3528 | case 11: |
d0d829e5 | 3529 | engine->emit_flush = gen11_emit_flush_rcs; |
845f7f7e MK |
3530 | engine->emit_fini_breadcrumb = gen11_emit_fini_breadcrumb_rcs; |
3531 | break; | |
3532 | default: | |
d0d829e5 | 3533 | engine->emit_flush = gen8_emit_flush_rcs; |
845f7f7e MK |
3534 | engine->emit_fini_breadcrumb = gen8_emit_fini_breadcrumb_rcs; |
3535 | break; | |
3536 | } | |
3537 | } | |
3538 | ||
11334c6a | 3539 | int intel_execlists_submission_setup(struct intel_engine_cs *engine) |
bb45438f | 3540 | { |
7d70a123 CW |
3541 | struct intel_engine_execlists * const execlists = &engine->execlists; |
3542 | struct drm_i915_private *i915 = engine->i915; | |
3543 | struct intel_uncore *uncore = engine->uncore; | |
3544 | u32 base = engine->mmio_base; | |
3545 | ||
22916bad | 3546 | tasklet_setup(&engine->sched_engine->tasklet, execlists_submission_tasklet); |
3a7a92ab CW |
3547 | timer_setup(&engine->execlists.timer, execlists_timeslice, 0); |
3548 | timer_setup(&engine->execlists.preempt, execlists_preempt, 0); | |
bb45438f | 3549 | |
bb45438f TU |
3550 | logical_ring_default_vfuncs(engine); |
3551 | logical_ring_default_irqs(engine); | |
52954edd | 3552 | |
c524cd40 UNR |
3553 | seqcount_init(&engine->stats.execlists.lock); |
3554 | ||
c674c5b9 | 3555 | if (engine->flags & I915_ENGINE_HAS_RCS_REG_STATE) |
845f7f7e | 3556 | rcs_submission_override(engine); |
11334c6a | 3557 | |
9a437ccb | 3558 | lrc_init_wa_ctx(engine); |
a60acb22 | 3559 | |
bc4237ec | 3560 | if (HAS_LOGICAL_RING_ELSQ(i915)) { |
72e9abc3 | 3561 | execlists->submit_reg = intel_uncore_regs(uncore) + |
baba6e57 | 3562 | i915_mmio_reg_offset(RING_EXECLIST_SQ_CONTENTS(base)); |
72e9abc3 | 3563 | execlists->ctrl_reg = intel_uncore_regs(uncore) + |
baba6e57 | 3564 | i915_mmio_reg_offset(RING_EXECLIST_CONTROL(base)); |
bfac1e2b MR |
3565 | |
3566 | engine->fw_domain = intel_uncore_forcewake_for_reg(engine->uncore, | |
3567 | RING_EXECLIST_CONTROL(engine->mmio_base), | |
3568 | FW_REG_WRITE); | |
05f0addd | 3569 | } else { |
72e9abc3 | 3570 | execlists->submit_reg = intel_uncore_regs(uncore) + |
baba6e57 | 3571 | i915_mmio_reg_offset(RING_ELSP(base)); |
05f0addd | 3572 | } |
693cfbf0 | 3573 | |
46592892 | 3574 | execlists->csb_status = |
f24a44e5 | 3575 | (u64 *)&engine->status_page.addr[I915_HWS_CSB_BUF0_INDEX]; |
bc4237ec | 3576 | |
46592892 | 3577 | execlists->csb_write = |
df62ae6f | 3578 | &engine->status_page.addr[INTEL_HWS_CSB_WRITE_INDEX(i915)]; |
bc4237ec | 3579 | |
c816723b | 3580 | if (GRAPHICS_VER(i915) < 11) |
632c7ad6 MK |
3581 | execlists->csb_size = GEN8_CSB_ENTRIES; |
3582 | else | |
3583 | execlists->csb_size = GEN11_CSB_ENTRIES; | |
7d4c75d9 | 3584 | |
6f0726b4 | 3585 | engine->context_tag = GENMASK(BITS_PER_LONG - 2, 0); |
50a9ea08 SS |
3586 | if (GRAPHICS_VER(engine->i915) >= 11 && |
3587 | GRAPHICS_VER_FULL(engine->i915) < IP_VER(12, 50)) { | |
2632f174 CW |
3588 | execlists->ccid |= engine->instance << (GEN11_ENGINE_INSTANCE_SHIFT - 32); |
3589 | execlists->ccid |= engine->class << (GEN11_ENGINE_CLASS_SHIFT - 32); | |
3590 | } | |
3591 | ||
7807a76b | 3592 | /* Finally, take ownership and responsibility for cleanup! */ |
bd3ec9e7 | 3593 | engine->sanitize = execlists_sanitize; |
7807a76b CW |
3594 | engine->release = execlists_release; |
3595 | ||
a19d6ff2 | 3596 | return 0; |
a19d6ff2 TU |
3597 | } |
3598 | ||
422d7df4 CW |
3599 | static struct list_head *virtual_queue(struct virtual_engine *ve) |
3600 | { | |
3e28d371 | 3601 | return &ve->base.sched_engine->default_priolist.requests; |
422d7df4 CW |
3602 | } |
3603 | ||
46eecfcc | 3604 | static void rcu_virtual_context_destroy(struct work_struct *wrk) |
6d06779e CW |
3605 | { |
3606 | struct virtual_engine *ve = | |
46eecfcc | 3607 | container_of(wrk, typeof(*ve), rcu.work); |
6d06779e CW |
3608 | unsigned int n; |
3609 | ||
754f7a0b | 3610 | GEM_BUG_ON(ve->context.inflight); |
6d06779e | 3611 | |
46eecfcc CW |
3612 | /* Preempt-to-busy may leave a stale request behind. */ |
3613 | if (unlikely(ve->request)) { | |
3614 | struct i915_request *old; | |
3615 | ||
349a2bc5 | 3616 | spin_lock_irq(&ve->base.sched_engine->lock); |
46eecfcc CW |
3617 | |
3618 | old = fetch_and_zero(&ve->request); | |
3619 | if (old) { | |
163433e5 | 3620 | GEM_BUG_ON(!__i915_request_is_complete(old)); |
46eecfcc CW |
3621 | __i915_request_submit(old); |
3622 | i915_request_put(old); | |
3623 | } | |
3624 | ||
349a2bc5 | 3625 | spin_unlock_irq(&ve->base.sched_engine->lock); |
46eecfcc CW |
3626 | } |
3627 | ||
3628 | /* | |
3629 | * Flush the tasklet in case it is still running on another core. | |
3630 | * | |
3631 | * This needs to be done before we remove ourselves from the siblings' | |
3632 | * rbtrees as in the case it is running in parallel, it may reinsert | |
3633 | * the rb_node into a sibling. | |
3634 | */ | |
22916bad | 3635 | tasklet_kill(&ve->base.sched_engine->tasklet); |
46eecfcc CW |
3636 | |
3637 | /* Decouple ourselves from the siblings, no more access allowed. */ | |
6d06779e CW |
3638 | for (n = 0; n < ve->num_siblings; n++) { |
3639 | struct intel_engine_cs *sibling = ve->siblings[n]; | |
3640 | struct rb_node *node = &ve->nodes[sibling->id].rb; | |
3641 | ||
3642 | if (RB_EMPTY_NODE(node)) | |
3643 | continue; | |
3644 | ||
349a2bc5 | 3645 | spin_lock_irq(&sibling->sched_engine->lock); |
6d06779e | 3646 | |
22916bad | 3647 | /* Detachment is lazily performed in the sched_engine->tasklet */ |
6d06779e CW |
3648 | if (!RB_EMPTY_NODE(node)) |
3649 | rb_erase_cached(node, &sibling->execlists.virtual); | |
3650 | ||
349a2bc5 | 3651 | spin_unlock_irq(&sibling->sched_engine->lock); |
6d06779e | 3652 | } |
22916bad | 3653 | GEM_BUG_ON(__tasklet_is_scheduled(&ve->base.sched_engine->tasklet)); |
46eecfcc | 3654 | GEM_BUG_ON(!list_empty(virtual_queue(ve))); |
6d06779e | 3655 | |
a0d3fdb6 | 3656 | lrc_fini(&ve->context); |
df8cf31e | 3657 | intel_context_fini(&ve->context); |
6d06779e | 3658 | |
3e28d371 | 3659 | if (ve->base.breadcrumbs) |
a95d1160 | 3660 | intel_breadcrumbs_put(ve->base.breadcrumbs); |
3e28d371 MB |
3661 | if (ve->base.sched_engine) |
3662 | i915_sched_engine_put(ve->base.sched_engine); | |
848862e6 CW |
3663 | intel_engine_free_request_pool(&ve->base); |
3664 | ||
6d06779e CW |
3665 | kfree(ve); |
3666 | } | |
3667 | ||
46eecfcc CW |
3668 | static void virtual_context_destroy(struct kref *kref) |
3669 | { | |
3670 | struct virtual_engine *ve = | |
3671 | container_of(kref, typeof(*ve), context.ref); | |
3672 | ||
3673 | GEM_BUG_ON(!list_empty(&ve->context.signals)); | |
3674 | ||
3675 | /* | |
3676 | * When destroying the virtual engine, we have to be aware that | |
3677 | * it may still be in use from an hardirq/softirq context causing | |
3678 | * the resubmission of a completed request (background completion | |
3679 | * due to preempt-to-busy). Before we can free the engine, we need | |
3680 | * to flush the submission code and tasklets that are still potentially | |
3681 | * accessing the engine. Flushing the tasklets requires process context, | |
3682 | * and since we can guard the resubmit onto the engine with an RCU read | |
3683 | * lock, we can delegate the free of the engine to an RCU worker. | |
3684 | */ | |
3685 | INIT_RCU_WORK(&ve->rcu, rcu_virtual_context_destroy); | |
848a4e5c | 3686 | queue_rcu_work(ve->context.engine->i915->unordered_wq, &ve->rcu); |
46eecfcc CW |
3687 | } |
3688 | ||
6d06779e CW |
3689 | static void virtual_engine_initial_hint(struct virtual_engine *ve) |
3690 | { | |
3691 | int swp; | |
3692 | ||
3693 | /* | |
3694 | * Pick a random sibling on starting to help spread the load around. | |
3695 | * | |
3696 | * New contexts are typically created with exactly the same order | |
3697 | * of siblings, and often started in batches. Due to the way we iterate | |
3698 | * the array of sibling when submitting requests, sibling[0] is | |
3699 | * prioritised for dequeuing. If we make sure that sibling[0] is fairly | |
3700 | * randomised across the system, we also help spread the load by the | |
3701 | * first engine we inspect being different each time. | |
3702 | * | |
3703 | * NB This does not force us to execute on this engine, it will just | |
3704 | * typically be the first we inspect for submission. | |
3705 | */ | |
8032bf12 | 3706 | swp = get_random_u32_below(ve->num_siblings); |
110f9efa CW |
3707 | if (swp) |
3708 | swap(ve->siblings[swp], ve->siblings[0]); | |
6d06779e CW |
3709 | } |
3710 | ||
d5e19353 CW |
3711 | static int virtual_context_alloc(struct intel_context *ce) |
3712 | { | |
3713 | struct virtual_engine *ve = container_of(ce, typeof(*ve), context); | |
3714 | ||
093a0bea | 3715 | return lrc_alloc(ce, ve->siblings[0]); |
d5e19353 CW |
3716 | } |
3717 | ||
a0d3fdb6 | 3718 | static int virtual_context_pre_pin(struct intel_context *ce, |
7898843c CW |
3719 | struct i915_gem_ww_ctx *ww, |
3720 | void **vaddr) | |
6d06779e CW |
3721 | { |
3722 | struct virtual_engine *ve = container_of(ce, typeof(*ve), context); | |
6d06779e | 3723 | |
9c01524d ML |
3724 | /* Note: we must use a real engine class for setting up reg state */ |
3725 | return __execlists_context_pre_pin(ce, ve->siblings[0], ww, vaddr); | |
a0d3fdb6 CW |
3726 | } |
3727 | ||
3728 | static int virtual_context_pin(struct intel_context *ce, void *vaddr) | |
3729 | { | |
3730 | struct virtual_engine *ve = container_of(ce, typeof(*ve), context); | |
3731 | ||
3732 | return lrc_pin(ce, ve->siblings[0], vaddr); | |
6d06779e CW |
3733 | } |
3734 | ||
3735 | static void virtual_context_enter(struct intel_context *ce) | |
3736 | { | |
3737 | struct virtual_engine *ve = container_of(ce, typeof(*ve), context); | |
3738 | unsigned int n; | |
3739 | ||
3740 | for (n = 0; n < ve->num_siblings; n++) | |
3741 | intel_engine_pm_get(ve->siblings[n]); | |
531958f6 CW |
3742 | |
3743 | intel_timeline_enter(ce->timeline); | |
6d06779e CW |
3744 | } |
3745 | ||
3746 | static void virtual_context_exit(struct intel_context *ce) | |
3747 | { | |
3748 | struct virtual_engine *ve = container_of(ce, typeof(*ve), context); | |
3749 | unsigned int n; | |
3750 | ||
531958f6 CW |
3751 | intel_timeline_exit(ce->timeline); |
3752 | ||
6d06779e CW |
3753 | for (n = 0; n < ve->num_siblings; n++) |
3754 | intel_engine_pm_put(ve->siblings[n]); | |
3755 | } | |
3756 | ||
55612025 MB |
3757 | static struct intel_engine_cs * |
3758 | virtual_get_sibling(struct intel_engine_cs *engine, unsigned int sibling) | |
3759 | { | |
3760 | struct virtual_engine *ve = to_virtual_engine(engine); | |
3761 | ||
3762 | if (sibling >= ve->num_siblings) | |
3763 | return NULL; | |
3764 | ||
3765 | return ve->siblings[sibling]; | |
3766 | } | |
3767 | ||
6d06779e | 3768 | static const struct intel_context_ops virtual_context_ops = { |
bb6287cb | 3769 | .flags = COPS_HAS_INFLIGHT | COPS_RUNTIME_CYCLES, |
cc1557ca | 3770 | |
d5e19353 CW |
3771 | .alloc = virtual_context_alloc, |
3772 | ||
62eaf0ae MB |
3773 | .cancel_request = execlists_context_cancel_request, |
3774 | ||
a0d3fdb6 | 3775 | .pre_pin = virtual_context_pre_pin, |
6d06779e | 3776 | .pin = virtual_context_pin, |
a0d3fdb6 CW |
3777 | .unpin = lrc_unpin, |
3778 | .post_unpin = lrc_post_unpin, | |
6d06779e CW |
3779 | |
3780 | .enter = virtual_context_enter, | |
3781 | .exit = virtual_context_exit, | |
3782 | ||
3783 | .destroy = virtual_context_destroy, | |
55612025 MB |
3784 | |
3785 | .get_sibling = virtual_get_sibling, | |
6d06779e CW |
3786 | }; |
3787 | ||
78e41ddd CW |
3788 | static intel_engine_mask_t virtual_submission_mask(struct virtual_engine *ve) |
3789 | { | |
3790 | struct i915_request *rq; | |
3791 | intel_engine_mask_t mask; | |
3792 | ||
3793 | rq = READ_ONCE(ve->request); | |
3794 | if (!rq) | |
3795 | return 0; | |
3796 | ||
3797 | /* The rq is ready for submission; rq->execution_mask is now stable. */ | |
3798 | mask = rq->execution_mask; | |
3799 | if (unlikely(!mask)) { | |
3800 | /* Invalid selection, submit to a random engine in error */ | |
36e191f0 | 3801 | i915_request_set_error_once(rq, -ENODEV); |
78e41ddd CW |
3802 | mask = ve->siblings[0]->mask; |
3803 | } | |
3804 | ||
639f2f24 VSD |
3805 | ENGINE_TRACE(&ve->base, "rq=%llx:%lld, mask=%x, prio=%d\n", |
3806 | rq->fence.context, rq->fence.seqno, | |
3e28d371 | 3807 | mask, ve->base.sched_engine->queue_priority_hint); |
78e41ddd CW |
3808 | |
3809 | return mask; | |
3810 | } | |
3811 | ||
2913fa4d | 3812 | static void virtual_submission_tasklet(struct tasklet_struct *t) |
6d06779e | 3813 | { |
22916bad MB |
3814 | struct i915_sched_engine *sched_engine = |
3815 | from_tasklet(sched_engine, t, tasklet); | |
2913fa4d | 3816 | struct virtual_engine * const ve = |
22916bad MB |
3817 | (struct virtual_engine *)sched_engine->private_data; |
3818 | const int prio = READ_ONCE(sched_engine->queue_priority_hint); | |
78e41ddd | 3819 | intel_engine_mask_t mask; |
6d06779e CW |
3820 | unsigned int n; |
3821 | ||
78e41ddd CW |
3822 | rcu_read_lock(); |
3823 | mask = virtual_submission_mask(ve); | |
3824 | rcu_read_unlock(); | |
3825 | if (unlikely(!mask)) | |
3826 | return; | |
3827 | ||
36fe164d CW |
3828 | for (n = 0; n < ve->num_siblings; n++) { |
3829 | struct intel_engine_cs *sibling = READ_ONCE(ve->siblings[n]); | |
6d06779e CW |
3830 | struct ve_node * const node = &ve->nodes[sibling->id]; |
3831 | struct rb_node **parent, *rb; | |
3832 | bool first; | |
3833 | ||
36fe164d CW |
3834 | if (!READ_ONCE(ve->request)) |
3835 | break; /* already handled by a sibling's tasklet */ | |
3836 | ||
349a2bc5 | 3837 | spin_lock_irq(&sibling->sched_engine->lock); |
2efa2c52 | 3838 | |
78e41ddd CW |
3839 | if (unlikely(!(mask & sibling->mask))) { |
3840 | if (!RB_EMPTY_NODE(&node->rb)) { | |
78e41ddd CW |
3841 | rb_erase_cached(&node->rb, |
3842 | &sibling->execlists.virtual); | |
3843 | RB_CLEAR_NODE(&node->rb); | |
78e41ddd | 3844 | } |
78e41ddd | 3845 | |
2efa2c52 CW |
3846 | goto unlock_engine; |
3847 | } | |
6d06779e | 3848 | |
2efa2c52 | 3849 | if (unlikely(!RB_EMPTY_NODE(&node->rb))) { |
6d06779e CW |
3850 | /* |
3851 | * Cheat and avoid rebalancing the tree if we can | |
3852 | * reuse this node in situ. | |
3853 | */ | |
3854 | first = rb_first_cached(&sibling->execlists.virtual) == | |
3855 | &node->rb; | |
3856 | if (prio == node->prio || (prio > node->prio && first)) | |
3857 | goto submit_engine; | |
3858 | ||
3859 | rb_erase_cached(&node->rb, &sibling->execlists.virtual); | |
3860 | } | |
3861 | ||
3862 | rb = NULL; | |
3863 | first = true; | |
3864 | parent = &sibling->execlists.virtual.rb_root.rb_node; | |
3865 | while (*parent) { | |
3866 | struct ve_node *other; | |
3867 | ||
3868 | rb = *parent; | |
3869 | other = rb_entry(rb, typeof(*other), rb); | |
3870 | if (prio > other->prio) { | |
3871 | parent = &rb->rb_left; | |
3872 | } else { | |
3873 | parent = &rb->rb_right; | |
3874 | first = false; | |
3875 | } | |
3876 | } | |
3877 | ||
3878 | rb_link_node(&node->rb, rb, parent); | |
3879 | rb_insert_color_cached(&node->rb, | |
3880 | &sibling->execlists.virtual, | |
3881 | first); | |
3882 | ||
3883 | submit_engine: | |
3884 | GEM_BUG_ON(RB_EMPTY_NODE(&node->rb)); | |
3885 | node->prio = prio; | |
3e28d371 | 3886 | if (first && prio > sibling->sched_engine->queue_priority_hint) |
22916bad | 3887 | tasklet_hi_schedule(&sibling->sched_engine->tasklet); |
6d06779e | 3888 | |
2efa2c52 | 3889 | unlock_engine: |
349a2bc5 | 3890 | spin_unlock_irq(&sibling->sched_engine->lock); |
2efa2c52 CW |
3891 | |
3892 | if (intel_context_inflight(&ve->context)) | |
3893 | break; | |
6d06779e | 3894 | } |
6d06779e CW |
3895 | } |
3896 | ||
3897 | static void virtual_submit_request(struct i915_request *rq) | |
3898 | { | |
3899 | struct virtual_engine *ve = to_virtual_engine(rq->engine); | |
b647c7df | 3900 | unsigned long flags; |
6d06779e | 3901 | |
639f2f24 VSD |
3902 | ENGINE_TRACE(&ve->base, "rq=%llx:%lld\n", |
3903 | rq->fence.context, | |
3904 | rq->fence.seqno); | |
6d06779e CW |
3905 | |
3906 | GEM_BUG_ON(ve->base.submit_request != virtual_submit_request); | |
3907 | ||
349a2bc5 | 3908 | spin_lock_irqsave(&ve->base.sched_engine->lock, flags); |
b647c7df | 3909 | |
f81475bb | 3910 | /* By the time we resubmit a request, it may be completed */ |
6f0726b4 | 3911 | if (__i915_request_is_complete(rq)) { |
b647c7df | 3912 | __i915_request_submit(rq); |
f81475bb CW |
3913 | goto unlock; |
3914 | } | |
6d06779e | 3915 | |
f81475bb | 3916 | if (ve->request) { /* background completion from preempt-to-busy */ |
163433e5 | 3917 | GEM_BUG_ON(!__i915_request_is_complete(ve->request)); |
f81475bb CW |
3918 | __i915_request_submit(ve->request); |
3919 | i915_request_put(ve->request); | |
3920 | } | |
b647c7df | 3921 | |
3e28d371 | 3922 | ve->base.sched_engine->queue_priority_hint = rq_prio(rq); |
f81475bb | 3923 | ve->request = i915_request_get(rq); |
b647c7df | 3924 | |
f81475bb CW |
3925 | GEM_BUG_ON(!list_empty(virtual_queue(ve))); |
3926 | list_move_tail(&rq->sched.link, virtual_queue(ve)); | |
3927 | ||
22916bad | 3928 | tasklet_hi_schedule(&ve->base.sched_engine->tasklet); |
422d7df4 | 3929 | |
f81475bb | 3930 | unlock: |
349a2bc5 | 3931 | spin_unlock_irqrestore(&ve->base.sched_engine->lock, flags); |
6d06779e CW |
3932 | } |
3933 | ||
55612025 | 3934 | static struct intel_context * |
e5e32171 MB |
3935 | execlists_create_virtual(struct intel_engine_cs **siblings, unsigned int count, |
3936 | unsigned long flags) | |
6d06779e | 3937 | { |
a10234fd | 3938 | struct drm_i915_private *i915 = siblings[0]->i915; |
6d06779e CW |
3939 | struct virtual_engine *ve; |
3940 | unsigned int n; | |
3941 | int err; | |
3942 | ||
6d06779e CW |
3943 | ve = kzalloc(struct_size(ve, siblings, count), GFP_KERNEL); |
3944 | if (!ve) | |
3945 | return ERR_PTR(-ENOMEM); | |
3946 | ||
a10234fd | 3947 | ve->base.i915 = i915; |
f937f561 | 3948 | ve->base.gt = siblings[0]->gt; |
20af04f3 | 3949 | ve->base.uncore = siblings[0]->uncore; |
6d06779e | 3950 | ve->base.id = -1; |
f75fc37b | 3951 | |
6d06779e CW |
3952 | ve->base.class = OTHER_CLASS; |
3953 | ve->base.uabi_class = I915_ENGINE_CLASS_INVALID; | |
3954 | ve->base.instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; | |
f75fc37b | 3955 | ve->base.uabi_instance = I915_ENGINE_CLASS_INVALID_VIRTUAL; |
6d06779e | 3956 | |
44d89409 CW |
3957 | /* |
3958 | * The decision on whether to submit a request using semaphores | |
3959 | * depends on the saturated state of the engine. We only compute | |
3960 | * this during HW submission of the request, and we need for this | |
3961 | * state to be globally applied to all requests being submitted | |
3962 | * to this engine. Virtual engines encompass more than one physical | |
3963 | * engine and so we cannot accurately tell in advance if one of those | |
3964 | * engines is already saturated and so cannot afford to use a semaphore | |
3965 | * and be pessimized in priority for doing so -- if we are the only | |
3966 | * context using semaphores after all other clients have stopped, we | |
3967 | * will be starved on the saturated system. Such a global switch for | |
3968 | * semaphores is less than ideal, but alas is the current compromise. | |
3969 | */ | |
3970 | ve->base.saturated = ALL_ENGINES; | |
3971 | ||
6d06779e CW |
3972 | snprintf(ve->base.name, sizeof(ve->base.name), "virtual"); |
3973 | ||
6d06779e CW |
3974 | intel_engine_init_execlists(&ve->base); |
3975 | ||
3e28d371 MB |
3976 | ve->base.sched_engine = i915_sched_engine_create(ENGINE_VIRTUAL); |
3977 | if (!ve->base.sched_engine) { | |
3978 | err = -ENOMEM; | |
3979 | goto err_put; | |
3980 | } | |
22916bad | 3981 | ve->base.sched_engine->private_data = &ve->base; |
3e28d371 | 3982 | |
6d06779e CW |
3983 | ve->base.cops = &virtual_context_ops; |
3984 | ve->base.request_alloc = execlists_request_alloc; | |
3985 | ||
3f623e06 | 3986 | ve->base.sched_engine->schedule = i915_schedule; |
71ed6011 | 3987 | ve->base.sched_engine->kick_backend = kick_execlists; |
6d06779e CW |
3988 | ve->base.submit_request = virtual_submit_request; |
3989 | ||
422d7df4 | 3990 | INIT_LIST_HEAD(virtual_queue(ve)); |
22916bad | 3991 | tasklet_setup(&ve->base.sched_engine->tasklet, virtual_submission_tasklet); |
6d06779e | 3992 | |
e6ba7648 | 3993 | intel_context_init(&ve->context, &ve->base); |
6d06779e | 3994 | |
b3786b29 CW |
3995 | ve->base.breadcrumbs = intel_breadcrumbs_create(NULL); |
3996 | if (!ve->base.breadcrumbs) { | |
3997 | err = -ENOMEM; | |
3998 | goto err_put; | |
3999 | } | |
4000 | ||
6d06779e CW |
4001 | for (n = 0; n < count; n++) { |
4002 | struct intel_engine_cs *sibling = siblings[n]; | |
4003 | ||
4004 | GEM_BUG_ON(!is_power_of_2(sibling->mask)); | |
4005 | if (sibling->mask & ve->base.mask) { | |
a10234fd TU |
4006 | drm_dbg(&i915->drm, |
4007 | "duplicate %s entry in load balancer\n", | |
4008 | sibling->name); | |
6d06779e CW |
4009 | err = -EINVAL; |
4010 | goto err_put; | |
4011 | } | |
4012 | ||
4013 | /* | |
4014 | * The virtual engine implementation is tightly coupled to | |
4015 | * the execlists backend -- we push out request directly | |
4016 | * into a tree inside each physical engine. We could support | |
4017 | * layering if we handle cloning of the requests and | |
4018 | * submitting a copy into each backend. | |
4019 | */ | |
22916bad | 4020 | if (sibling->sched_engine->tasklet.callback != |
6d06779e CW |
4021 | execlists_submission_tasklet) { |
4022 | err = -ENODEV; | |
4023 | goto err_put; | |
4024 | } | |
4025 | ||
4026 | GEM_BUG_ON(RB_EMPTY_NODE(&ve->nodes[sibling->id].rb)); | |
4027 | RB_CLEAR_NODE(&ve->nodes[sibling->id].rb); | |
4028 | ||
4029 | ve->siblings[ve->num_siblings++] = sibling; | |
4030 | ve->base.mask |= sibling->mask; | |
4f3059dc | 4031 | ve->base.logical_mask |= sibling->logical_mask; |
6d06779e CW |
4032 | |
4033 | /* | |
4034 | * All physical engines must be compatible for their emission | |
4035 | * functions (as we build the instructions during request | |
4036 | * construction and do not alter them before submission | |
4037 | * on the physical engine). We use the engine class as a guide | |
4038 | * here, although that could be refined. | |
4039 | */ | |
4040 | if (ve->base.class != OTHER_CLASS) { | |
4041 | if (ve->base.class != sibling->class) { | |
a10234fd TU |
4042 | drm_dbg(&i915->drm, |
4043 | "invalid mixing of engine class, sibling %d, already %d\n", | |
4044 | sibling->class, ve->base.class); | |
6d06779e CW |
4045 | err = -EINVAL; |
4046 | goto err_put; | |
4047 | } | |
4048 | continue; | |
4049 | } | |
4050 | ||
4051 | ve->base.class = sibling->class; | |
4052 | ve->base.uabi_class = sibling->uabi_class; | |
4053 | snprintf(ve->base.name, sizeof(ve->base.name), | |
4054 | "v%dx%d", ve->base.class, count); | |
4055 | ve->base.context_size = sibling->context_size; | |
4056 | ||
d1cee2d3 MB |
4057 | ve->base.add_active_request = sibling->add_active_request; |
4058 | ve->base.remove_active_request = sibling->remove_active_request; | |
6d06779e CW |
4059 | ve->base.emit_bb_start = sibling->emit_bb_start; |
4060 | ve->base.emit_flush = sibling->emit_flush; | |
4061 | ve->base.emit_init_breadcrumb = sibling->emit_init_breadcrumb; | |
4062 | ve->base.emit_fini_breadcrumb = sibling->emit_fini_breadcrumb; | |
4063 | ve->base.emit_fini_breadcrumb_dw = | |
4064 | sibling->emit_fini_breadcrumb_dw; | |
09975b86 CW |
4065 | |
4066 | ve->base.flags = sibling->flags; | |
6d06779e CW |
4067 | } |
4068 | ||
09975b86 CW |
4069 | ve->base.flags |= I915_ENGINE_IS_VIRTUAL; |
4070 | ||
110f9efa | 4071 | virtual_engine_initial_hint(ve); |
6d06779e CW |
4072 | return &ve->context; |
4073 | ||
4074 | err_put: | |
4075 | intel_context_put(&ve->context); | |
4076 | return ERR_PTR(err); | |
4077 | } | |
4078 | ||
0212bdef CW |
4079 | void intel_execlists_show_requests(struct intel_engine_cs *engine, |
4080 | struct drm_printer *m, | |
4081 | void (*show_request)(struct drm_printer *m, | |
1f0e785a | 4082 | const struct i915_request *rq, |
562675d0 CW |
4083 | const char *prefix, |
4084 | int indent), | |
0212bdef CW |
4085 | unsigned int max) |
4086 | { | |
4087 | const struct intel_engine_execlists *execlists = &engine->execlists; | |
349a2bc5 | 4088 | struct i915_sched_engine *sched_engine = engine->sched_engine; |
0212bdef CW |
4089 | struct i915_request *rq, *last; |
4090 | unsigned long flags; | |
4091 | unsigned int count; | |
4092 | struct rb_node *rb; | |
4093 | ||
349a2bc5 | 4094 | spin_lock_irqsave(&sched_engine->lock, flags); |
0212bdef CW |
4095 | |
4096 | last = NULL; | |
4097 | count = 0; | |
349a2bc5 | 4098 | list_for_each_entry(rq, &sched_engine->requests, sched.link) { |
0212bdef | 4099 | if (count++ < max - 1) |
562675d0 | 4100 | show_request(m, rq, "\t\t", 0); |
0212bdef CW |
4101 | else |
4102 | last = rq; | |
4103 | } | |
4104 | if (last) { | |
4105 | if (count > max) { | |
4106 | drm_printf(m, | |
4107 | "\t\t...skipping %d executing requests...\n", | |
4108 | count - max); | |
4109 | } | |
562675d0 | 4110 | show_request(m, last, "\t\t", 0); |
0212bdef CW |
4111 | } |
4112 | ||
3e28d371 | 4113 | if (sched_engine->queue_priority_hint != INT_MIN) |
4d97cbe0 | 4114 | drm_printf(m, "\t\tQueue priority hint: %d\n", |
3e28d371 | 4115 | READ_ONCE(sched_engine->queue_priority_hint)); |
81dcef4c CW |
4116 | |
4117 | last = NULL; | |
4118 | count = 0; | |
3e28d371 | 4119 | for (rb = rb_first_cached(&sched_engine->queue); rb; rb = rb_next(rb)) { |
0212bdef | 4120 | struct i915_priolist *p = rb_entry(rb, typeof(*p), node); |
0212bdef | 4121 | |
2867ff6c | 4122 | priolist_for_each_request(rq, p) { |
0212bdef | 4123 | if (count++ < max - 1) |
562675d0 | 4124 | show_request(m, rq, "\t\t", 0); |
0212bdef CW |
4125 | else |
4126 | last = rq; | |
4127 | } | |
4128 | } | |
4129 | if (last) { | |
4130 | if (count > max) { | |
4131 | drm_printf(m, | |
4132 | "\t\t...skipping %d queued requests...\n", | |
4133 | count - max); | |
4134 | } | |
562675d0 | 4135 | show_request(m, last, "\t\t", 0); |
0212bdef CW |
4136 | } |
4137 | ||
6d06779e CW |
4138 | last = NULL; |
4139 | count = 0; | |
4140 | for (rb = rb_first_cached(&execlists->virtual); rb; rb = rb_next(rb)) { | |
4141 | struct virtual_engine *ve = | |
4142 | rb_entry(rb, typeof(*ve), nodes[engine->id].rb); | |
4143 | struct i915_request *rq = READ_ONCE(ve->request); | |
4144 | ||
4145 | if (rq) { | |
4146 | if (count++ < max - 1) | |
562675d0 | 4147 | show_request(m, rq, "\t\t", 0); |
6d06779e CW |
4148 | else |
4149 | last = rq; | |
4150 | } | |
4151 | } | |
4152 | if (last) { | |
4153 | if (count > max) { | |
4154 | drm_printf(m, | |
4155 | "\t\t...skipping %d virtual requests...\n", | |
4156 | count - max); | |
4157 | } | |
562675d0 | 4158 | show_request(m, last, "\t\t", 0); |
6d06779e CW |
4159 | } |
4160 | ||
349a2bc5 | 4161 | spin_unlock_irqrestore(&sched_engine->lock, flags); |
0212bdef CW |
4162 | } |
4163 | ||
a4be3dca JH |
4164 | void intel_execlists_dump_active_requests(struct intel_engine_cs *engine, |
4165 | struct i915_request *hung_rq, | |
4166 | struct drm_printer *m) | |
4167 | { | |
4168 | unsigned long flags; | |
4169 | ||
4170 | spin_lock_irqsave(&engine->sched_engine->lock, flags); | |
4171 | ||
4172 | intel_engine_dump_active_requests(&engine->sched_engine->requests, hung_rq, m); | |
4173 | ||
25746a3f | 4174 | drm_printf(m, "\tOn hold?: %zu\n", |
924fb3ec | 4175 | list_count_nodes(&engine->sched_engine->hold)); |
a4be3dca JH |
4176 | |
4177 | spin_unlock_irqrestore(&engine->sched_engine->lock, flags); | |
4178 | } | |
4179 | ||
2c66555e | 4180 | #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST) |
70a2b431 | 4181 | #include "selftest_execlists.c" |
2c66555e | 4182 | #endif |