2 * Copyright © 2014 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Ben Widawsky <ben@bwidawsk.net>
25 * Michel Thierry <michel.thierry@intel.com>
26 * Thomas Daniel <thomas.daniel@intel.com>
27 * Oscar Mateo <oscar.mateo@intel.com>
32 * DOC: Logical Rings, Logical Ring Contexts and Execlists
35 * GEN8 brings an expansion of the HW contexts: "Logical Ring Contexts".
36 * These expanded contexts enable a number of new abilities, especially
37 * "Execlists" (also implemented in this file).
39 * One of the main differences with the legacy HW contexts is that logical
40 * ring contexts incorporate many more things to the context's state, like
41 * PDPs or ringbuffer control registers:
43 * The reason why PDPs are included in the context is straightforward: as
44 * PPGTTs (per-process GTTs) are actually per-context, having the PDPs
45 * contained there mean you don't need to do a ppgtt->switch_mm yourself,
46 * instead, the GPU will do it for you on the context switch.
48 * But, what about the ringbuffer control registers (head, tail, etc..)?
49 * shouldn't we just need a set of those per engine command streamer? This is
50 * where the name "Logical Rings" starts to make sense: by virtualizing the
51 * rings, the engine cs shifts to a new "ring buffer" with every context
52 * switch. When you want to submit a workload to the GPU you: A) choose your
53 * context, B) find its appropriate virtualized ring, C) write commands to it
54 * and then, finally, D) tell the GPU to switch to that context.
56 * Instead of the legacy MI_SET_CONTEXT, the way you tell the GPU to switch
57 * to a contexts is via a context execution list, ergo "Execlists".
60 * Regarding the creation of contexts, we have:
62 * - One global default context.
63 * - One local default context for each opened fd.
64 * - One local extra context for each context create ioctl call.
66 * Now that ringbuffers belong per-context (and not per-engine, like before)
67 * and that contexts are uniquely tied to a given engine (and not reusable,
68 * like before) we need:
70 * - One ringbuffer per-engine inside each context.
71 * - One backing object per-engine inside each context.
73 * The global default context starts its life with these new objects fully
74 * allocated and populated. The local default context for each opened fd is
75 * more complex, because we don't know at creation time which engine is going
76 * to use them. To handle this, we have implemented a deferred creation of LR
79 * The local context starts its life as a hollow or blank holder, that only
80 * gets populated for a given engine once we receive an execbuffer. If later
81 * on we receive another execbuffer ioctl for the same context but a different
82 * engine, we allocate/populate a new ringbuffer and context backing object and
85 * Finally, regarding local contexts created using the ioctl call: as they are
86 * only allowed with the render ring, we can allocate & populate them right
87 * away (no need to defer anything, at least for now).
89 * Execlists implementation:
90 * Execlists are the new method by which, on gen8+ hardware, workloads are
91 * submitted for execution (as opposed to the legacy, ringbuffer-based, method).
92 * This method works as follows:
94 * When a request is committed, its commands (the BB start and any leading or
95 * trailing commands, like the seqno breadcrumbs) are placed in the ringbuffer
96 * for the appropriate context. The tail pointer in the hardware context is not
97 * updated at this time, but instead, kept by the driver in the ringbuffer
98 * structure. A structure representing this request is added to a request queue
99 * for the appropriate engine: this structure contains a copy of the context's
100 * tail after the request was written to the ring buffer and a pointer to the
103 * If the engine's request queue was empty before the request was added, the
104 * queue is processed immediately. Otherwise the queue will be processed during
105 * a context switch interrupt. In any case, elements on the queue will get sent
106 * (in pairs) to the GPU's ExecLists Submit Port (ELSP, for short) with a
107 * globally unique 20-bits submission ID.
109 * When execution of a request completes, the GPU updates the context status
110 * buffer with a context complete event and generates a context switch interrupt.
111 * During the interrupt handling, the driver examines the events in the buffer:
112 * for each context complete event, if the announced ID matches that on the head
113 * of the request queue, then that request is retired and removed from the queue.
115 * After processing, if any requests were retired and the queue is not empty
116 * then a new execution list can be submitted. The two requests at the front of
117 * the queue are next to be submitted but since a context may not occur twice in
118 * an execution list, if subsequent requests have the same ID as the first then
119 * the two requests must be combined. This is done simply by discarding requests
120 * at the head of the queue until either only one requests is left (in which case
121 * we use a NULL second context) or the first two requests have unique IDs.
123 * By always executing the first two requests in the queue the driver ensures
124 * that the GPU is kept as busy as possible. In the case where a single context
125 * completes but a second context is still executing, the request for this second
126 * context will be at the head of the queue when we remove the first one. This
127 * request will then be resubmitted along with a new request for a different context,
128 * which will cause the hardware to continue executing the second request and queue
129 * the new request (the GPU detects the condition of a context getting preempted
130 * with the same context and optimizes the context switch flow by not doing
131 * preemption, but just sampling the new tail pointer).
134 #include <linux/interrupt.h>
136 #include <drm/drmP.h>
137 #include <drm/i915_drm.h>
138 #include "i915_drv.h"
139 #include "i915_gem_render_state.h"
140 #include "intel_lrc_reg.h"
141 #include "intel_mocs.h"
143 #define RING_EXECLIST_QFULL (1 << 0x2)
144 #define RING_EXECLIST1_VALID (1 << 0x3)
145 #define RING_EXECLIST0_VALID (1 << 0x4)
146 #define RING_EXECLIST_ACTIVE_STATUS (3 << 0xE)
147 #define RING_EXECLIST1_ACTIVE (1 << 0x11)
148 #define RING_EXECLIST0_ACTIVE (1 << 0x12)
150 #define GEN8_CTX_STATUS_IDLE_ACTIVE (1 << 0)
151 #define GEN8_CTX_STATUS_PREEMPTED (1 << 1)
152 #define GEN8_CTX_STATUS_ELEMENT_SWITCH (1 << 2)
153 #define GEN8_CTX_STATUS_ACTIVE_IDLE (1 << 3)
154 #define GEN8_CTX_STATUS_COMPLETE (1 << 4)
155 #define GEN8_CTX_STATUS_LITE_RESTORE (1 << 15)
157 #define GEN8_CTX_STATUS_COMPLETED_MASK \
158 (GEN8_CTX_STATUS_COMPLETE | GEN8_CTX_STATUS_PREEMPTED)
160 /* Typical size of the average request (2 pipecontrols and a MI_BB) */
161 #define EXECLISTS_REQUEST_SIZE 64 /* bytes */
162 #define WA_TAIL_DWORDS 2
163 #define WA_TAIL_BYTES (sizeof(u32) * WA_TAIL_DWORDS)
164 #define PREEMPT_ID 0x1
166 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
167 struct intel_engine_cs *engine);
168 static void execlists_init_reg_state(u32 *reg_state,
169 struct i915_gem_context *ctx,
170 struct intel_engine_cs *engine,
171 struct intel_ring *ring);
174 * intel_lr_context_descriptor_update() - calculate & cache the descriptor
175 * descriptor for a pinned context
176 * @ctx: Context to work on
177 * @engine: Engine the descriptor will be used with
179 * The context descriptor encodes various attributes of a context,
180 * including its GTT address and some flags. Because it's fairly
181 * expensive to calculate, we'll just do it once and cache the result,
182 * which remains valid until the context is unpinned.
184 * This is what a descriptor looks like, from LSB to MSB::
186 * bits 0-11: flags, GEN8_CTX_* (cached in ctx->desc_template)
187 * bits 12-31: LRCA, GTT address of (the HWSP of) this context
188 * bits 32-52: ctx ID, a globally unique tag
189 * bits 53-54: mbz, reserved for use by hardware
190 * bits 55-63: group ID, currently unused and set to 0
193 intel_lr_context_descriptor_update(struct i915_gem_context *ctx,
194 struct intel_engine_cs *engine)
196 struct intel_context *ce = &ctx->engine[engine->id];
199 BUILD_BUG_ON(MAX_CONTEXT_HW_ID > (1<<GEN8_CTX_ID_WIDTH));
201 desc = ctx->desc_template; /* bits 0-11 */
202 desc |= i915_ggtt_offset(ce->state) + LRC_HEADER_PAGES * PAGE_SIZE;
204 desc |= (u64)ctx->hw_id << GEN8_CTX_ID_SHIFT; /* bits 32-52 */
209 static struct i915_priolist *
210 lookup_priolist(struct intel_engine_cs *engine,
211 struct i915_priotree *pt,
214 struct intel_engine_execlists * const execlists = &engine->execlists;
215 struct i915_priolist *p;
216 struct rb_node **parent, *rb;
219 if (unlikely(execlists->no_priolist))
220 prio = I915_PRIORITY_NORMAL;
223 /* most positive priority is scheduled first, equal priorities fifo */
225 parent = &execlists->queue.rb_node;
228 p = rb_entry(rb, typeof(*p), node);
229 if (prio > p->priority) {
230 parent = &rb->rb_left;
231 } else if (prio < p->priority) {
232 parent = &rb->rb_right;
239 if (prio == I915_PRIORITY_NORMAL) {
240 p = &execlists->default_priolist;
242 p = kmem_cache_alloc(engine->i915->priorities, GFP_ATOMIC);
243 /* Convert an allocation failure to a priority bump */
245 prio = I915_PRIORITY_NORMAL; /* recurses just once */
247 /* To maintain ordering with all rendering, after an
248 * allocation failure we have to disable all scheduling.
249 * Requests will then be executed in fifo, and schedule
250 * will ensure that dependencies are emitted in fifo.
251 * There will be still some reordering with existing
252 * requests, so if userspace lied about their
253 * dependencies that reordering may be visible.
255 execlists->no_priolist = true;
261 INIT_LIST_HEAD(&p->requests);
262 rb_link_node(&p->node, rb, parent);
263 rb_insert_color(&p->node, &execlists->queue);
266 execlists->first = &p->node;
268 return ptr_pack_bits(p, first, 1);
271 static void unwind_wa_tail(struct drm_i915_gem_request *rq)
273 rq->tail = intel_ring_wrap(rq->ring, rq->wa_tail - WA_TAIL_BYTES);
274 assert_ring_tail_valid(rq->ring, rq->tail);
277 static void __unwind_incomplete_requests(struct intel_engine_cs *engine)
279 struct drm_i915_gem_request *rq, *rn;
280 struct i915_priolist *uninitialized_var(p);
281 int last_prio = I915_PRIORITY_INVALID;
283 lockdep_assert_held(&engine->timeline->lock);
285 list_for_each_entry_safe_reverse(rq, rn,
286 &engine->timeline->requests,
288 if (i915_gem_request_completed(rq))
291 __i915_gem_request_unsubmit(rq);
294 GEM_BUG_ON(rq->priotree.priority == I915_PRIORITY_INVALID);
295 if (rq->priotree.priority != last_prio) {
296 p = lookup_priolist(engine,
298 rq->priotree.priority);
299 p = ptr_mask_bits(p, 1);
301 last_prio = rq->priotree.priority;
304 list_add(&rq->priotree.link, &p->requests);
309 execlists_unwind_incomplete_requests(struct intel_engine_execlists *execlists)
311 struct intel_engine_cs *engine =
312 container_of(execlists, typeof(*engine), execlists);
314 spin_lock_irq(&engine->timeline->lock);
315 __unwind_incomplete_requests(engine);
316 spin_unlock_irq(&engine->timeline->lock);
320 execlists_context_status_change(struct drm_i915_gem_request *rq,
321 unsigned long status)
324 * Only used when GVT-g is enabled now. When GVT-g is disabled,
325 * The compiler should eliminate this function as dead-code.
327 if (!IS_ENABLED(CONFIG_DRM_I915_GVT))
330 atomic_notifier_call_chain(&rq->engine->context_status_notifier,
335 execlists_context_schedule_in(struct drm_i915_gem_request *rq)
337 execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_IN);
338 intel_engine_context_in(rq->engine);
342 execlists_context_schedule_out(struct drm_i915_gem_request *rq)
344 intel_engine_context_out(rq->engine);
345 execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_OUT);
349 execlists_update_context_pdps(struct i915_hw_ppgtt *ppgtt, u32 *reg_state)
351 ASSIGN_CTX_PDP(ppgtt, reg_state, 3);
352 ASSIGN_CTX_PDP(ppgtt, reg_state, 2);
353 ASSIGN_CTX_PDP(ppgtt, reg_state, 1);
354 ASSIGN_CTX_PDP(ppgtt, reg_state, 0);
357 static u64 execlists_update_context(struct drm_i915_gem_request *rq)
359 struct intel_context *ce = &rq->ctx->engine[rq->engine->id];
360 struct i915_hw_ppgtt *ppgtt =
361 rq->ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
362 u32 *reg_state = ce->lrc_reg_state;
364 reg_state[CTX_RING_TAIL+1] = intel_ring_set_tail(rq->ring, rq->tail);
366 /* True 32b PPGTT with dynamic page allocation: update PDP
367 * registers and point the unallocated PDPs to scratch page.
368 * PML4 is allocated during ppgtt init, so this is not needed
371 if (ppgtt && !i915_vm_is_48bit(&ppgtt->base))
372 execlists_update_context_pdps(ppgtt, reg_state);
377 static inline void elsp_write(u64 desc, u32 __iomem *elsp)
379 writel(upper_32_bits(desc), elsp);
380 writel(lower_32_bits(desc), elsp);
383 static void execlists_submit_ports(struct intel_engine_cs *engine)
385 struct execlist_port *port = engine->execlists.port;
388 for (n = execlists_num_ports(&engine->execlists); n--; ) {
389 struct drm_i915_gem_request *rq;
393 rq = port_unpack(&port[n], &count);
395 GEM_BUG_ON(count > !n);
397 execlists_context_schedule_in(rq);
398 port_set(&port[n], port_pack(rq, count));
399 desc = execlists_update_context(rq);
400 GEM_DEBUG_EXEC(port[n].context_id = upper_32_bits(desc));
402 GEM_TRACE("%s in[%d]: ctx=%d.%d, seqno=%x\n",
404 port[n].context_id, count,
411 elsp_write(desc, engine->execlists.elsp);
413 execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK);
416 static bool ctx_single_port_submission(const struct i915_gem_context *ctx)
418 return (IS_ENABLED(CONFIG_DRM_I915_GVT) &&
419 i915_gem_context_force_single_submission(ctx));
422 static bool can_merge_ctx(const struct i915_gem_context *prev,
423 const struct i915_gem_context *next)
428 if (ctx_single_port_submission(prev))
434 static void port_assign(struct execlist_port *port,
435 struct drm_i915_gem_request *rq)
437 GEM_BUG_ON(rq == port_request(port));
439 if (port_isset(port))
440 i915_gem_request_put(port_request(port));
442 port_set(port, port_pack(i915_gem_request_get(rq), port_count(port)));
445 static void inject_preempt_context(struct intel_engine_cs *engine)
447 struct intel_context *ce =
448 &engine->i915->preempt_context->engine[engine->id];
451 GEM_BUG_ON(engine->i915->preempt_context->hw_id != PREEMPT_ID);
452 GEM_BUG_ON(!IS_ALIGNED(ce->ring->size, WA_TAIL_BYTES));
454 memset(ce->ring->vaddr + ce->ring->tail, 0, WA_TAIL_BYTES);
455 ce->ring->tail += WA_TAIL_BYTES;
456 ce->ring->tail &= (ce->ring->size - 1);
457 ce->lrc_reg_state[CTX_RING_TAIL+1] = ce->ring->tail;
459 GEM_BUG_ON((ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1] &
460 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
461 CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)) !=
462 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
463 CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT));
465 GEM_TRACE("%s\n", engine->name);
466 for (n = execlists_num_ports(&engine->execlists); --n; )
467 elsp_write(0, engine->execlists.elsp);
469 elsp_write(ce->lrc_desc, engine->execlists.elsp);
470 execlists_clear_active(&engine->execlists, EXECLISTS_ACTIVE_HWACK);
473 static void execlists_dequeue(struct intel_engine_cs *engine)
475 struct intel_engine_execlists * const execlists = &engine->execlists;
476 struct execlist_port *port = execlists->port;
477 const struct execlist_port * const last_port =
478 &execlists->port[execlists->port_mask];
479 struct drm_i915_gem_request *last = port_request(port);
483 /* Hardware submission is through 2 ports. Conceptually each port
484 * has a (RING_START, RING_HEAD, RING_TAIL) tuple. RING_START is
485 * static for a context, and unique to each, so we only execute
486 * requests belonging to a single context from each ring. RING_HEAD
487 * is maintained by the CS in the context image, it marks the place
488 * where it got up to last time, and through RING_TAIL we tell the CS
489 * where we want to execute up to this time.
491 * In this list the requests are in order of execution. Consecutive
492 * requests from the same context are adjacent in the ringbuffer. We
493 * can combine these requests into a single RING_TAIL update:
495 * RING_HEAD...req1...req2
497 * since to execute req2 the CS must first execute req1.
499 * Our goal then is to point each port to the end of a consecutive
500 * sequence of requests as being the most optimal (fewest wake ups
501 * and context switches) submission.
504 spin_lock_irq(&engine->timeline->lock);
505 rb = execlists->first;
506 GEM_BUG_ON(rb_first(&execlists->queue) != rb);
512 * Don't resubmit or switch until all outstanding
513 * preemptions (lite-restore) are seen. Then we
514 * know the next preemption status we see corresponds
515 * to this ELSP update.
517 GEM_BUG_ON(!port_count(&port[0]));
518 if (port_count(&port[0]) > 1)
522 * If we write to ELSP a second time before the HW has had
523 * a chance to respond to the previous write, we can confuse
524 * the HW and hit "undefined behaviour". After writing to ELSP,
525 * we must then wait until we see a context-switch event from
526 * the HW to indicate that it has had a chance to respond.
528 if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_HWACK))
531 if (HAS_LOGICAL_RING_PREEMPTION(engine->i915) &&
532 rb_entry(rb, struct i915_priolist, node)->priority >
533 max(last->priotree.priority, 0)) {
535 * Switch to our empty preempt context so
536 * the state of the GPU is known (idle).
538 inject_preempt_context(engine);
539 execlists_set_active(execlists,
540 EXECLISTS_ACTIVE_PREEMPT);
544 * In theory, we could coalesce more requests onto
545 * the second port (the first port is active, with
546 * no preemptions pending). However, that means we
547 * then have to deal with the possible lite-restore
548 * of the second port (as we submit the ELSP, there
549 * may be a context-switch) but also we may complete
550 * the resubmission before the context-switch. Ergo,
551 * coalescing onto the second port will cause a
552 * preemption event, but we cannot predict whether
553 * that will affect port[0] or port[1].
555 * If the second port is already active, we can wait
556 * until the next context-switch before contemplating
557 * new requests. The GPU will be busy and we should be
558 * able to resubmit the new ELSP before it idles,
559 * avoiding pipeline bubbles (momentary pauses where
560 * the driver is unable to keep up the supply of new
563 if (port_count(&port[1]))
566 /* WaIdleLiteRestore:bdw,skl
567 * Apply the wa NOOPs to prevent
568 * ring:HEAD == req:TAIL as we resubmit the
569 * request. See gen8_emit_breadcrumb() for
570 * where we prepare the padding after the
571 * end of the request.
573 last->tail = last->wa_tail;
578 struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
579 struct drm_i915_gem_request *rq, *rn;
581 list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
583 * Can we combine this request with the current port?
584 * It has to be the same context/ringbuffer and not
585 * have any exceptions (e.g. GVT saying never to
588 * If we can combine the requests, we can execute both
589 * by updating the RING_TAIL to point to the end of the
590 * second request, and so we never need to tell the
591 * hardware about the first.
593 if (last && !can_merge_ctx(rq->ctx, last->ctx)) {
595 * If we are on the second port and cannot
596 * combine this request with the last, then we
599 if (port == last_port) {
600 __list_del_many(&p->requests,
606 * If GVT overrides us we only ever submit
607 * port[0], leaving port[1] empty. Note that we
608 * also have to be careful that we don't queue
609 * the same context (even though a different
610 * request) to the second port.
612 if (ctx_single_port_submission(last->ctx) ||
613 ctx_single_port_submission(rq->ctx)) {
614 __list_del_many(&p->requests,
619 GEM_BUG_ON(last->ctx == rq->ctx);
622 port_assign(port, last);
625 GEM_BUG_ON(port_isset(port));
628 INIT_LIST_HEAD(&rq->priotree.link);
629 __i915_gem_request_submit(rq);
630 trace_i915_gem_request_in(rq, port_index(port, execlists));
636 rb_erase(&p->node, &execlists->queue);
637 INIT_LIST_HEAD(&p->requests);
638 if (p->priority != I915_PRIORITY_NORMAL)
639 kmem_cache_free(engine->i915->priorities, p);
642 execlists->first = rb;
644 port_assign(port, last);
646 spin_unlock_irq(&engine->timeline->lock);
649 execlists_set_active(execlists, EXECLISTS_ACTIVE_USER);
650 execlists_submit_ports(engine);
655 execlists_cancel_port_requests(struct intel_engine_execlists * const execlists)
657 struct execlist_port *port = execlists->port;
658 unsigned int num_ports = execlists_num_ports(execlists);
660 while (num_ports-- && port_isset(port)) {
661 struct drm_i915_gem_request *rq = port_request(port);
663 GEM_BUG_ON(!execlists->active);
664 intel_engine_context_out(rq->engine);
665 execlists_context_status_change(rq, INTEL_CONTEXT_SCHEDULE_PREEMPTED);
666 i915_gem_request_put(rq);
668 memset(port, 0, sizeof(*port));
673 static void execlists_cancel_requests(struct intel_engine_cs *engine)
675 struct intel_engine_execlists * const execlists = &engine->execlists;
676 struct drm_i915_gem_request *rq, *rn;
680 spin_lock_irqsave(&engine->timeline->lock, flags);
682 /* Cancel the requests on the HW and clear the ELSP tracker. */
683 execlists_cancel_port_requests(execlists);
685 /* Mark all executing requests as skipped. */
686 list_for_each_entry(rq, &engine->timeline->requests, link) {
687 GEM_BUG_ON(!rq->global_seqno);
688 if (!i915_gem_request_completed(rq))
689 dma_fence_set_error(&rq->fence, -EIO);
692 /* Flush the queued requests to the timeline list (for retiring). */
693 rb = execlists->first;
695 struct i915_priolist *p = rb_entry(rb, typeof(*p), node);
697 list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
698 INIT_LIST_HEAD(&rq->priotree.link);
700 dma_fence_set_error(&rq->fence, -EIO);
701 __i915_gem_request_submit(rq);
705 rb_erase(&p->node, &execlists->queue);
706 INIT_LIST_HEAD(&p->requests);
707 if (p->priority != I915_PRIORITY_NORMAL)
708 kmem_cache_free(engine->i915->priorities, p);
711 /* Remaining _unready_ requests will be nop'ed when submitted */
714 execlists->queue = RB_ROOT;
715 execlists->first = NULL;
716 GEM_BUG_ON(port_isset(execlists->port));
719 * The port is checked prior to scheduling a tasklet, but
720 * just in case we have suspended the tasklet to do the
721 * wedging make sure that when it wakes, it decides there
722 * is no work to do by clearing the irq_posted bit.
724 clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
726 spin_unlock_irqrestore(&engine->timeline->lock, flags);
730 * Check the unread Context Status Buffers and manage the submission of new
731 * contexts to the ELSP accordingly.
733 static void execlists_submission_tasklet(unsigned long data)
735 struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
736 struct intel_engine_execlists * const execlists = &engine->execlists;
737 struct execlist_port * const port = execlists->port;
738 struct drm_i915_private *dev_priv = engine->i915;
741 /* We can skip acquiring intel_runtime_pm_get() here as it was taken
742 * on our behalf by the request (see i915_gem_mark_busy()) and it will
743 * not be relinquished until the device is idle (see
744 * i915_gem_idle_work_handler()). As a precaution, we make sure
745 * that all ELSP are drained i.e. we have processed the CSB,
746 * before allowing ourselves to idle and calling intel_runtime_pm_put().
748 GEM_BUG_ON(!dev_priv->gt.awake);
750 /* Prefer doing test_and_clear_bit() as a two stage operation to avoid
751 * imposing the cost of a locked atomic transaction when submitting a
752 * new request (outside of the context-switch interrupt).
754 while (test_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted)) {
755 /* The HWSP contains a (cacheable) mirror of the CSB */
757 &engine->status_page.page_addr[I915_HWS_CSB_BUF0_INDEX];
758 unsigned int head, tail;
760 if (unlikely(execlists->csb_use_mmio)) {
761 buf = (u32 * __force)
762 (dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_BUF_LO(engine, 0)));
763 execlists->csb_head = -1; /* force mmio read of CSB ptrs */
766 /* The write will be ordered by the uncached read (itself
767 * a memory barrier), so we do not need another in the form
768 * of a locked instruction. The race between the interrupt
769 * handler and the split test/clear is harmless as we order
770 * our clear before the CSB read. If the interrupt arrived
771 * first between the test and the clear, we read the updated
772 * CSB and clear the bit. If the interrupt arrives as we read
773 * the CSB or later (i.e. after we had cleared the bit) the bit
774 * is set and we do a new loop.
776 __clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
777 if (unlikely(execlists->csb_head == -1)) { /* following a reset */
779 intel_uncore_forcewake_get(dev_priv,
780 execlists->fw_domains);
784 head = readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
785 tail = GEN8_CSB_WRITE_PTR(head);
786 head = GEN8_CSB_READ_PTR(head);
787 execlists->csb_head = head;
789 const int write_idx =
790 intel_hws_csb_write_index(dev_priv) -
791 I915_HWS_CSB_BUF0_INDEX;
793 head = execlists->csb_head;
794 tail = READ_ONCE(buf[write_idx]);
796 GEM_TRACE("%s cs-irq head=%d [%d%s], tail=%d [%d%s]\n",
798 head, GEN8_CSB_READ_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?",
799 tail, GEN8_CSB_WRITE_PTR(readl(dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)))), fw ? "" : "?");
801 while (head != tail) {
802 struct drm_i915_gem_request *rq;
806 if (++head == GEN8_CSB_ENTRIES)
809 /* We are flying near dragons again.
811 * We hold a reference to the request in execlist_port[]
812 * but no more than that. We are operating in softirq
813 * context and so cannot hold any mutex or sleep. That
814 * prevents us stopping the requests we are processing
815 * in port[] from being retired simultaneously (the
816 * breadcrumb will be complete before we see the
817 * context-switch). As we only hold the reference to the
818 * request, any pointer chasing underneath the request
819 * is subject to a potential use-after-free. Thus we
820 * store all of the bookkeeping within port[] as
821 * required, and avoid using unguarded pointers beneath
822 * request itself. The same applies to the atomic
826 status = READ_ONCE(buf[2 * head]); /* maybe mmio! */
827 GEM_TRACE("%s csb[%d]: status=0x%08x:0x%08x, active=0x%x\n",
829 status, buf[2*head + 1],
832 if (status & (GEN8_CTX_STATUS_IDLE_ACTIVE |
833 GEN8_CTX_STATUS_PREEMPTED))
834 execlists_set_active(execlists,
835 EXECLISTS_ACTIVE_HWACK);
836 if (status & GEN8_CTX_STATUS_ACTIVE_IDLE)
837 execlists_clear_active(execlists,
838 EXECLISTS_ACTIVE_HWACK);
840 if (!(status & GEN8_CTX_STATUS_COMPLETED_MASK))
843 /* We should never get a COMPLETED | IDLE_ACTIVE! */
844 GEM_BUG_ON(status & GEN8_CTX_STATUS_IDLE_ACTIVE);
846 if (status & GEN8_CTX_STATUS_COMPLETE &&
847 buf[2*head + 1] == PREEMPT_ID) {
848 GEM_TRACE("%s preempt-idle\n", engine->name);
850 execlists_cancel_port_requests(execlists);
851 execlists_unwind_incomplete_requests(execlists);
853 GEM_BUG_ON(!execlists_is_active(execlists,
854 EXECLISTS_ACTIVE_PREEMPT));
855 execlists_clear_active(execlists,
856 EXECLISTS_ACTIVE_PREEMPT);
860 if (status & GEN8_CTX_STATUS_PREEMPTED &&
861 execlists_is_active(execlists,
862 EXECLISTS_ACTIVE_PREEMPT))
865 GEM_BUG_ON(!execlists_is_active(execlists,
866 EXECLISTS_ACTIVE_USER));
868 /* Check the context/desc id for this event matches */
869 GEM_DEBUG_BUG_ON(buf[2 * head + 1] != port->context_id);
871 rq = port_unpack(port, &count);
872 GEM_TRACE("%s out[0]: ctx=%d.%d, seqno=%x\n",
874 port->context_id, count,
875 rq ? rq->global_seqno : 0);
876 GEM_BUG_ON(count == 0);
878 GEM_BUG_ON(status & GEN8_CTX_STATUS_PREEMPTED);
879 GEM_BUG_ON(port_isset(&port[1]) &&
880 !(status & GEN8_CTX_STATUS_ELEMENT_SWITCH));
881 GEM_BUG_ON(!i915_gem_request_completed(rq));
882 execlists_context_schedule_out(rq);
883 trace_i915_gem_request_out(rq);
884 i915_gem_request_put(rq);
886 execlists_port_complete(execlists, port);
888 port_set(port, port_pack(rq, count));
891 /* After the final element, the hw should be idle */
892 GEM_BUG_ON(port_count(port) == 0 &&
893 !(status & GEN8_CTX_STATUS_ACTIVE_IDLE));
894 if (port_count(port) == 0)
895 execlists_clear_active(execlists,
896 EXECLISTS_ACTIVE_USER);
899 if (head != execlists->csb_head) {
900 execlists->csb_head = head;
901 writel(_MASKED_FIELD(GEN8_CSB_READ_PTR_MASK, head << 8),
902 dev_priv->regs + i915_mmio_reg_offset(RING_CONTEXT_STATUS_PTR(engine)));
906 if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
907 execlists_dequeue(engine);
910 intel_uncore_forcewake_put(dev_priv, execlists->fw_domains);
913 static void insert_request(struct intel_engine_cs *engine,
914 struct i915_priotree *pt,
917 struct i915_priolist *p = lookup_priolist(engine, pt, prio);
919 list_add_tail(&pt->link, &ptr_mask_bits(p, 1)->requests);
920 if (ptr_unmask_bits(p, 1))
921 tasklet_hi_schedule(&engine->execlists.tasklet);
924 static void execlists_submit_request(struct drm_i915_gem_request *request)
926 struct intel_engine_cs *engine = request->engine;
929 /* Will be called from irq-context when using foreign fences. */
930 spin_lock_irqsave(&engine->timeline->lock, flags);
932 insert_request(engine, &request->priotree, request->priotree.priority);
934 GEM_BUG_ON(!engine->execlists.first);
935 GEM_BUG_ON(list_empty(&request->priotree.link));
937 spin_unlock_irqrestore(&engine->timeline->lock, flags);
940 static struct drm_i915_gem_request *pt_to_request(struct i915_priotree *pt)
942 return container_of(pt, struct drm_i915_gem_request, priotree);
945 static struct intel_engine_cs *
946 pt_lock_engine(struct i915_priotree *pt, struct intel_engine_cs *locked)
948 struct intel_engine_cs *engine = pt_to_request(pt)->engine;
952 if (engine != locked) {
953 spin_unlock(&locked->timeline->lock);
954 spin_lock(&engine->timeline->lock);
960 static void execlists_schedule(struct drm_i915_gem_request *request, int prio)
962 struct intel_engine_cs *engine;
963 struct i915_dependency *dep, *p;
964 struct i915_dependency stack;
967 GEM_BUG_ON(prio == I915_PRIORITY_INVALID);
969 if (i915_gem_request_completed(request))
972 if (prio <= READ_ONCE(request->priotree.priority))
975 /* Need BKL in order to use the temporary link inside i915_dependency */
976 lockdep_assert_held(&request->i915->drm.struct_mutex);
978 stack.signaler = &request->priotree;
979 list_add(&stack.dfs_link, &dfs);
982 * Recursively bump all dependent priorities to match the new request.
984 * A naive approach would be to use recursion:
985 * static void update_priorities(struct i915_priotree *pt, prio) {
986 * list_for_each_entry(dep, &pt->signalers_list, signal_link)
987 * update_priorities(dep->signal, prio)
988 * insert_request(pt);
990 * but that may have unlimited recursion depth and so runs a very
991 * real risk of overunning the kernel stack. Instead, we build
992 * a flat list of all dependencies starting with the current request.
993 * As we walk the list of dependencies, we add all of its dependencies
994 * to the end of the list (this may include an already visited
995 * request) and continue to walk onwards onto the new dependencies. The
996 * end result is a topological list of requests in reverse order, the
997 * last element in the list is the request we must execute first.
999 list_for_each_entry(dep, &dfs, dfs_link) {
1000 struct i915_priotree *pt = dep->signaler;
1003 * Within an engine, there can be no cycle, but we may
1004 * refer to the same dependency chain multiple times
1005 * (redundant dependencies are not eliminated) and across
1008 list_for_each_entry(p, &pt->signalers_list, signal_link) {
1009 GEM_BUG_ON(p == dep); /* no cycles! */
1011 if (i915_priotree_signaled(p->signaler))
1014 GEM_BUG_ON(p->signaler->priority < pt->priority);
1015 if (prio > READ_ONCE(p->signaler->priority))
1016 list_move_tail(&p->dfs_link, &dfs);
1021 * If we didn't need to bump any existing priorities, and we haven't
1022 * yet submitted this request (i.e. there is no potential race with
1023 * execlists_submit_request()), we can set our own priority and skip
1024 * acquiring the engine locks.
1026 if (request->priotree.priority == I915_PRIORITY_INVALID) {
1027 GEM_BUG_ON(!list_empty(&request->priotree.link));
1028 request->priotree.priority = prio;
1029 if (stack.dfs_link.next == stack.dfs_link.prev)
1031 __list_del_entry(&stack.dfs_link);
1034 engine = request->engine;
1035 spin_lock_irq(&engine->timeline->lock);
1037 /* Fifo and depth-first replacement ensure our deps execute before us */
1038 list_for_each_entry_safe_reverse(dep, p, &dfs, dfs_link) {
1039 struct i915_priotree *pt = dep->signaler;
1041 INIT_LIST_HEAD(&dep->dfs_link);
1043 engine = pt_lock_engine(pt, engine);
1045 if (prio <= pt->priority)
1048 pt->priority = prio;
1049 if (!list_empty(&pt->link)) {
1050 __list_del_entry(&pt->link);
1051 insert_request(engine, pt, prio);
1055 spin_unlock_irq(&engine->timeline->lock);
1058 static int __context_pin(struct i915_gem_context *ctx, struct i915_vma *vma)
1064 * Clear this page out of any CPU caches for coherent swap-in/out.
1065 * We only want to do this on the first bind so that we do not stall
1066 * on an active context (which by nature is already on the GPU).
1068 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1069 err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1074 flags = PIN_GLOBAL | PIN_HIGH;
1075 if (ctx->ggtt_offset_bias)
1076 flags |= PIN_OFFSET_BIAS | ctx->ggtt_offset_bias;
1078 return i915_vma_pin(vma, 0, GEN8_LR_CONTEXT_ALIGN, flags);
1081 static struct intel_ring *
1082 execlists_context_pin(struct intel_engine_cs *engine,
1083 struct i915_gem_context *ctx)
1085 struct intel_context *ce = &ctx->engine[engine->id];
1089 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1091 if (likely(ce->pin_count++))
1093 GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
1095 ret = execlists_context_deferred_alloc(ctx, engine);
1098 GEM_BUG_ON(!ce->state);
1100 ret = __context_pin(ctx, ce->state);
1104 vaddr = i915_gem_object_pin_map(ce->state->obj, I915_MAP_WB);
1105 if (IS_ERR(vaddr)) {
1106 ret = PTR_ERR(vaddr);
1110 ret = intel_ring_pin(ce->ring, ctx->i915, ctx->ggtt_offset_bias);
1114 intel_lr_context_descriptor_update(ctx, engine);
1116 ce->lrc_reg_state = vaddr + LRC_STATE_PN * PAGE_SIZE;
1117 ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
1118 i915_ggtt_offset(ce->ring->vma);
1120 ce->state->obj->pin_global++;
1121 i915_gem_context_get(ctx);
1126 i915_gem_object_unpin_map(ce->state->obj);
1128 __i915_vma_unpin(ce->state);
1131 return ERR_PTR(ret);
1134 static void execlists_context_unpin(struct intel_engine_cs *engine,
1135 struct i915_gem_context *ctx)
1137 struct intel_context *ce = &ctx->engine[engine->id];
1139 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1140 GEM_BUG_ON(ce->pin_count == 0);
1142 if (--ce->pin_count)
1145 intel_ring_unpin(ce->ring);
1147 ce->state->obj->pin_global--;
1148 i915_gem_object_unpin_map(ce->state->obj);
1149 i915_vma_unpin(ce->state);
1151 i915_gem_context_put(ctx);
1154 static int execlists_request_alloc(struct drm_i915_gem_request *request)
1156 struct intel_engine_cs *engine = request->engine;
1157 struct intel_context *ce = &request->ctx->engine[engine->id];
1160 GEM_BUG_ON(!ce->pin_count);
1162 /* Flush enough space to reduce the likelihood of waiting after
1163 * we start building the request - in which case we will just
1164 * have to repeat work.
1166 request->reserved_space += EXECLISTS_REQUEST_SIZE;
1168 ret = intel_ring_wait_for_space(request->ring, request->reserved_space);
1172 /* Note that after this point, we have committed to using
1173 * this request as it is being used to both track the
1174 * state of engine initialisation and liveness of the
1175 * golden renderstate above. Think twice before you try
1176 * to cancel/unwind this request now.
1179 request->reserved_space -= EXECLISTS_REQUEST_SIZE;
1184 * In this WA we need to set GEN8_L3SQCREG4[21:21] and reset it after
1185 * PIPE_CONTROL instruction. This is required for the flush to happen correctly
1186 * but there is a slight complication as this is applied in WA batch where the
1187 * values are only initialized once so we cannot take register value at the
1188 * beginning and reuse it further; hence we save its value to memory, upload a
1189 * constant value with bit21 set and then we restore it back with the saved value.
1190 * To simplify the WA, a constant value is formed by using the default value
1191 * of this register. This shouldn't be a problem because we are only modifying
1192 * it for a short period and this batch in non-premptible. We can ofcourse
1193 * use additional instructions that read the actual value of the register
1194 * at that time and set our bit of interest but it makes the WA complicated.
1196 * This WA is also required for Gen9 so extracting as a function avoids
1200 gen8_emit_flush_coherentl3_wa(struct intel_engine_cs *engine, u32 *batch)
1202 *batch++ = MI_STORE_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
1203 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
1204 *batch++ = i915_ggtt_offset(engine->scratch) + 256;
1207 *batch++ = MI_LOAD_REGISTER_IMM(1);
1208 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
1209 *batch++ = 0x40400000 | GEN8_LQSC_FLUSH_COHERENT_LINES;
1211 batch = gen8_emit_pipe_control(batch,
1212 PIPE_CONTROL_CS_STALL |
1213 PIPE_CONTROL_DC_FLUSH_ENABLE,
1216 *batch++ = MI_LOAD_REGISTER_MEM_GEN8 | MI_SRM_LRM_GLOBAL_GTT;
1217 *batch++ = i915_mmio_reg_offset(GEN8_L3SQCREG4);
1218 *batch++ = i915_ggtt_offset(engine->scratch) + 256;
1225 * Typically we only have one indirect_ctx and per_ctx batch buffer which are
1226 * initialized at the beginning and shared across all contexts but this field
1227 * helps us to have multiple batches at different offsets and select them based
1228 * on a criteria. At the moment this batch always start at the beginning of the page
1229 * and at this point we don't have multiple wa_ctx batch buffers.
1231 * The number of WA applied are not known at the beginning; we use this field
1232 * to return the no of DWORDS written.
1234 * It is to be noted that this batch does not contain MI_BATCH_BUFFER_END
1235 * so it adds NOOPs as padding to make it cacheline aligned.
1236 * MI_BATCH_BUFFER_END will be added to perctx batch and both of them together
1237 * makes a complete batch buffer.
1239 static u32 *gen8_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
1241 /* WaDisableCtxRestoreArbitration:bdw,chv */
1242 *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1244 /* WaFlushCoherentL3CacheLinesAtContextSwitch:bdw */
1245 if (IS_BROADWELL(engine->i915))
1246 batch = gen8_emit_flush_coherentl3_wa(engine, batch);
1248 /* WaClearSlmSpaceAtContextSwitch:bdw,chv */
1249 /* Actual scratch location is at 128 bytes offset */
1250 batch = gen8_emit_pipe_control(batch,
1251 PIPE_CONTROL_FLUSH_L3 |
1252 PIPE_CONTROL_GLOBAL_GTT_IVB |
1253 PIPE_CONTROL_CS_STALL |
1254 PIPE_CONTROL_QW_WRITE,
1255 i915_ggtt_offset(engine->scratch) +
1256 2 * CACHELINE_BYTES);
1258 *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
1260 /* Pad to end of cacheline */
1261 while ((unsigned long)batch % CACHELINE_BYTES)
1265 * MI_BATCH_BUFFER_END is not required in Indirect ctx BB because
1266 * execution depends on the length specified in terms of cache lines
1267 * in the register CTX_RCS_INDIRECT_CTX
1273 static u32 *gen9_init_indirectctx_bb(struct intel_engine_cs *engine, u32 *batch)
1275 *batch++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1277 /* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
1278 batch = gen8_emit_flush_coherentl3_wa(engine, batch);
1280 /* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
1281 *batch++ = MI_LOAD_REGISTER_IMM(1);
1282 *batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2);
1283 *batch++ = _MASKED_BIT_DISABLE(
1284 GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE);
1287 /* WaClearSlmSpaceAtContextSwitch:kbl */
1288 /* Actual scratch location is at 128 bytes offset */
1289 if (IS_KBL_REVID(engine->i915, 0, KBL_REVID_A0)) {
1290 batch = gen8_emit_pipe_control(batch,
1291 PIPE_CONTROL_FLUSH_L3 |
1292 PIPE_CONTROL_GLOBAL_GTT_IVB |
1293 PIPE_CONTROL_CS_STALL |
1294 PIPE_CONTROL_QW_WRITE,
1295 i915_ggtt_offset(engine->scratch)
1296 + 2 * CACHELINE_BYTES);
1299 /* WaMediaPoolStateCmdInWABB:bxt,glk */
1300 if (HAS_POOLED_EU(engine->i915)) {
1302 * EU pool configuration is setup along with golden context
1303 * during context initialization. This value depends on
1304 * device type (2x6 or 3x6) and needs to be updated based
1305 * on which subslice is disabled especially for 2x6
1306 * devices, however it is safe to load default
1307 * configuration of 3x6 device instead of masking off
1308 * corresponding bits because HW ignores bits of a disabled
1309 * subslice and drops down to appropriate config. Please
1310 * see render_state_setup() in i915_gem_render_state.c for
1311 * possible configurations, to avoid duplication they are
1312 * not shown here again.
1314 *batch++ = GEN9_MEDIA_POOL_STATE;
1315 *batch++ = GEN9_MEDIA_POOL_ENABLE;
1316 *batch++ = 0x00777000;
1322 *batch++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
1324 /* Pad to end of cacheline */
1325 while ((unsigned long)batch % CACHELINE_BYTES)
1331 #define CTX_WA_BB_OBJ_SIZE (PAGE_SIZE)
1333 static int lrc_setup_wa_ctx(struct intel_engine_cs *engine)
1335 struct drm_i915_gem_object *obj;
1336 struct i915_vma *vma;
1339 obj = i915_gem_object_create(engine->i915, CTX_WA_BB_OBJ_SIZE);
1341 return PTR_ERR(obj);
1343 vma = i915_vma_instance(obj, &engine->i915->ggtt.base, NULL);
1349 err = i915_vma_pin(vma, 0, PAGE_SIZE, PIN_GLOBAL | PIN_HIGH);
1353 engine->wa_ctx.vma = vma;
1357 i915_gem_object_put(obj);
1361 static void lrc_destroy_wa_ctx(struct intel_engine_cs *engine)
1363 i915_vma_unpin_and_release(&engine->wa_ctx.vma);
1366 typedef u32 *(*wa_bb_func_t)(struct intel_engine_cs *engine, u32 *batch);
1368 static int intel_init_workaround_bb(struct intel_engine_cs *engine)
1370 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
1371 struct i915_wa_ctx_bb *wa_bb[2] = { &wa_ctx->indirect_ctx,
1373 wa_bb_func_t wa_bb_fn[2];
1375 void *batch, *batch_ptr;
1379 if (GEM_WARN_ON(engine->id != RCS))
1382 switch (INTEL_GEN(engine->i915)) {
1386 wa_bb_fn[0] = gen9_init_indirectctx_bb;
1390 wa_bb_fn[0] = gen8_init_indirectctx_bb;
1394 MISSING_CASE(INTEL_GEN(engine->i915));
1398 ret = lrc_setup_wa_ctx(engine);
1400 DRM_DEBUG_DRIVER("Failed to setup context WA page: %d\n", ret);
1404 page = i915_gem_object_get_dirty_page(wa_ctx->vma->obj, 0);
1405 batch = batch_ptr = kmap_atomic(page);
1408 * Emit the two workaround batch buffers, recording the offset from the
1409 * start of the workaround batch buffer object for each and their
1412 for (i = 0; i < ARRAY_SIZE(wa_bb_fn); i++) {
1413 wa_bb[i]->offset = batch_ptr - batch;
1414 if (GEM_WARN_ON(!IS_ALIGNED(wa_bb[i]->offset,
1415 CACHELINE_BYTES))) {
1420 batch_ptr = wa_bb_fn[i](engine, batch_ptr);
1421 wa_bb[i]->size = batch_ptr - (batch + wa_bb[i]->offset);
1424 BUG_ON(batch_ptr - batch > CTX_WA_BB_OBJ_SIZE);
1426 kunmap_atomic(batch);
1428 lrc_destroy_wa_ctx(engine);
1433 static u8 gtiir[] = {
1441 static void enable_execlists(struct intel_engine_cs *engine)
1443 struct drm_i915_private *dev_priv = engine->i915;
1445 I915_WRITE(RING_HWSTAM(engine->mmio_base), 0xffffffff);
1448 * Make sure we're not enabling the new 12-deep CSB
1449 * FIFO as that requires a slightly updated handling
1450 * in the ctx switch irq. Since we're currently only
1451 * using only 2 elements of the enhanced execlists the
1452 * deeper FIFO it's not needed and it's not worth adding
1453 * more statements to the irq handler to support it.
1455 if (INTEL_GEN(dev_priv) >= 11)
1456 I915_WRITE(RING_MODE_GEN7(engine),
1457 _MASKED_BIT_DISABLE(GEN11_GFX_DISABLE_LEGACY_MODE));
1459 I915_WRITE(RING_MODE_GEN7(engine),
1460 _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
1462 I915_WRITE(RING_HWS_PGA(engine->mmio_base),
1463 engine->status_page.ggtt_offset);
1464 POSTING_READ(RING_HWS_PGA(engine->mmio_base));
1467 static int gen8_init_common_ring(struct intel_engine_cs *engine)
1469 struct intel_engine_execlists * const execlists = &engine->execlists;
1472 ret = intel_mocs_init_engine(engine);
1476 intel_engine_reset_breadcrumbs(engine);
1477 intel_engine_init_hangcheck(engine);
1479 enable_execlists(engine);
1481 GEM_BUG_ON(engine->id >= ARRAY_SIZE(gtiir));
1483 execlists->csb_head = -1;
1484 execlists->active = 0;
1486 /* After a GPU reset, we may have requests to replay */
1487 if (execlists->first)
1488 tasklet_schedule(&execlists->tasklet);
1493 static int gen8_init_render_ring(struct intel_engine_cs *engine)
1495 struct drm_i915_private *dev_priv = engine->i915;
1498 ret = gen8_init_common_ring(engine);
1502 /* We need to disable the AsyncFlip performance optimisations in order
1503 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
1504 * programmed to '1' on all products.
1506 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw,chv
1508 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
1510 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
1512 return init_workarounds_ring(engine);
1515 static int gen9_init_render_ring(struct intel_engine_cs *engine)
1519 ret = gen8_init_common_ring(engine);
1523 return init_workarounds_ring(engine);
1526 static void reset_irq(struct intel_engine_cs *engine)
1528 struct drm_i915_private *dev_priv = engine->i915;
1532 * Clear any pending interrupt state.
1534 * We do it twice out of paranoia that some of the IIR are double
1535 * buffered, and if we only reset it once there may still be
1536 * an interrupt pending.
1538 for (i = 0; i < 2; i++) {
1539 I915_WRITE(GEN8_GT_IIR(gtiir[engine->id]),
1540 GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift);
1541 POSTING_READ(GEN8_GT_IIR(gtiir[engine->id]));
1543 GEM_BUG_ON(I915_READ(GEN8_GT_IIR(gtiir[engine->id])) &
1544 (GT_CONTEXT_SWITCH_INTERRUPT << engine->irq_shift));
1546 clear_bit(ENGINE_IRQ_EXECLIST, &engine->irq_posted);
1549 static void reset_common_ring(struct intel_engine_cs *engine,
1550 struct drm_i915_gem_request *request)
1552 struct intel_engine_execlists * const execlists = &engine->execlists;
1553 struct intel_context *ce;
1554 unsigned long flags;
1556 GEM_TRACE("%s seqno=%x\n",
1557 engine->name, request ? request->global_seqno : 0);
1561 spin_lock_irqsave(&engine->timeline->lock, flags);
1564 * Catch up with any missed context-switch interrupts.
1566 * Ideally we would just read the remaining CSB entries now that we
1567 * know the gpu is idle. However, the CSB registers are sometimes^W
1568 * often trashed across a GPU reset! Instead we have to rely on
1569 * guessing the missed context-switch events by looking at what
1570 * requests were completed.
1572 execlists_cancel_port_requests(execlists);
1574 /* Push back any incomplete requests for replay after the reset. */
1575 __unwind_incomplete_requests(engine);
1577 spin_unlock_irqrestore(&engine->timeline->lock, flags);
1579 /* If the request was innocent, we leave the request in the ELSP
1580 * and will try to replay it on restarting. The context image may
1581 * have been corrupted by the reset, in which case we may have
1582 * to service a new GPU hang, but more likely we can continue on
1585 * If the request was guilty, we presume the context is corrupt
1586 * and have to at least restore the RING register in the context
1587 * image back to the expected values to skip over the guilty request.
1589 if (!request || request->fence.error != -EIO)
1592 /* We want a simple context + ring to execute the breadcrumb update.
1593 * We cannot rely on the context being intact across the GPU hang,
1594 * so clear it and rebuild just what we need for the breadcrumb.
1595 * All pending requests for this context will be zapped, and any
1596 * future request will be after userspace has had the opportunity
1597 * to recreate its own state.
1599 ce = &request->ctx->engine[engine->id];
1600 execlists_init_reg_state(ce->lrc_reg_state,
1601 request->ctx, engine, ce->ring);
1603 /* Move the RING_HEAD onto the breadcrumb, past the hanging batch */
1604 ce->lrc_reg_state[CTX_RING_BUFFER_START+1] =
1605 i915_ggtt_offset(ce->ring->vma);
1606 ce->lrc_reg_state[CTX_RING_HEAD+1] = request->postfix;
1608 request->ring->head = request->postfix;
1609 intel_ring_update_space(request->ring);
1611 /* Reset WaIdleLiteRestore:bdw,skl as well */
1612 unwind_wa_tail(request);
1615 static int intel_logical_ring_emit_pdps(struct drm_i915_gem_request *req)
1617 struct i915_hw_ppgtt *ppgtt = req->ctx->ppgtt;
1618 struct intel_engine_cs *engine = req->engine;
1619 const int num_lri_cmds = GEN8_3LVL_PDPES * 2;
1623 cs = intel_ring_begin(req, num_lri_cmds * 2 + 2);
1627 *cs++ = MI_LOAD_REGISTER_IMM(num_lri_cmds);
1628 for (i = GEN8_3LVL_PDPES - 1; i >= 0; i--) {
1629 const dma_addr_t pd_daddr = i915_page_dir_dma_addr(ppgtt, i);
1631 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_UDW(engine, i));
1632 *cs++ = upper_32_bits(pd_daddr);
1633 *cs++ = i915_mmio_reg_offset(GEN8_RING_PDP_LDW(engine, i));
1634 *cs++ = lower_32_bits(pd_daddr);
1638 intel_ring_advance(req, cs);
1643 static int gen8_emit_bb_start(struct drm_i915_gem_request *req,
1644 u64 offset, u32 len,
1645 const unsigned int flags)
1650 /* Don't rely in hw updating PDPs, specially in lite-restore.
1651 * Ideally, we should set Force PD Restore in ctx descriptor,
1652 * but we can't. Force Restore would be a second option, but
1653 * it is unsafe in case of lite-restore (because the ctx is
1654 * not idle). PML4 is allocated during ppgtt init so this is
1655 * not needed in 48-bit.*/
1656 if (req->ctx->ppgtt &&
1657 (intel_engine_flag(req->engine) & req->ctx->ppgtt->pd_dirty_rings) &&
1658 !i915_vm_is_48bit(&req->ctx->ppgtt->base) &&
1659 !intel_vgpu_active(req->i915)) {
1660 ret = intel_logical_ring_emit_pdps(req);
1664 req->ctx->ppgtt->pd_dirty_rings &= ~intel_engine_flag(req->engine);
1667 cs = intel_ring_begin(req, 4);
1672 * WaDisableCtxRestoreArbitration:bdw,chv
1674 * We don't need to perform MI_ARB_ENABLE as often as we do (in
1675 * particular all the gen that do not need the w/a at all!), if we
1676 * took care to make sure that on every switch into this context
1677 * (both ordinary and for preemption) that arbitrartion was enabled
1678 * we would be fine. However, there doesn't seem to be a downside to
1679 * being paranoid and making sure it is set before each batch and
1680 * every context-switch.
1682 * Note that if we fail to enable arbitration before the request
1683 * is complete, then we do not see the context-switch interrupt and
1684 * the engine hangs (with RING_HEAD == RING_TAIL).
1686 * That satisfies both the GPGPU w/a and our heavy-handed paranoia.
1688 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
1690 /* FIXME(BDW): Address space and security selectors. */
1691 *cs++ = MI_BATCH_BUFFER_START_GEN8 |
1692 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8)) |
1693 (flags & I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
1694 *cs++ = lower_32_bits(offset);
1695 *cs++ = upper_32_bits(offset);
1696 intel_ring_advance(req, cs);
1701 static void gen8_logical_ring_enable_irq(struct intel_engine_cs *engine)
1703 struct drm_i915_private *dev_priv = engine->i915;
1704 I915_WRITE_IMR(engine,
1705 ~(engine->irq_enable_mask | engine->irq_keep_mask));
1706 POSTING_READ_FW(RING_IMR(engine->mmio_base));
1709 static void gen8_logical_ring_disable_irq(struct intel_engine_cs *engine)
1711 struct drm_i915_private *dev_priv = engine->i915;
1712 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
1715 static int gen8_emit_flush(struct drm_i915_gem_request *request, u32 mode)
1719 cs = intel_ring_begin(request, 4);
1723 cmd = MI_FLUSH_DW + 1;
1725 /* We always require a command barrier so that subsequent
1726 * commands, such as breadcrumb interrupts, are strictly ordered
1727 * wrt the contents of the write cache being flushed to memory
1728 * (and thus being coherent from the CPU).
1730 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
1732 if (mode & EMIT_INVALIDATE) {
1733 cmd |= MI_INVALIDATE_TLB;
1734 if (request->engine->id == VCS)
1735 cmd |= MI_INVALIDATE_BSD;
1739 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
1740 *cs++ = 0; /* upper addr */
1741 *cs++ = 0; /* value */
1742 intel_ring_advance(request, cs);
1747 static int gen8_emit_flush_render(struct drm_i915_gem_request *request,
1750 struct intel_engine_cs *engine = request->engine;
1752 i915_ggtt_offset(engine->scratch) + 2 * CACHELINE_BYTES;
1753 bool vf_flush_wa = false, dc_flush_wa = false;
1757 flags |= PIPE_CONTROL_CS_STALL;
1759 if (mode & EMIT_FLUSH) {
1760 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
1761 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
1762 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
1763 flags |= PIPE_CONTROL_FLUSH_ENABLE;
1766 if (mode & EMIT_INVALIDATE) {
1767 flags |= PIPE_CONTROL_TLB_INVALIDATE;
1768 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
1769 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
1770 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
1771 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
1772 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
1773 flags |= PIPE_CONTROL_QW_WRITE;
1774 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
1777 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
1780 if (IS_GEN9(request->i915))
1783 /* WaForGAMHang:kbl */
1784 if (IS_KBL_REVID(request->i915, 0, KBL_REVID_B0))
1796 cs = intel_ring_begin(request, len);
1801 cs = gen8_emit_pipe_control(cs, 0, 0);
1804 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
1807 cs = gen8_emit_pipe_control(cs, flags, scratch_addr);
1810 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
1812 intel_ring_advance(request, cs);
1818 * Reserve space for 2 NOOPs at the end of each request to be
1819 * used as a workaround for not being allowed to do lite
1820 * restore with HEAD==TAIL (WaIdleLiteRestore).
1822 static void gen8_emit_wa_tail(struct drm_i915_gem_request *request, u32 *cs)
1824 /* Ensure there's always at least one preemption point per-request. */
1825 *cs++ = MI_ARB_CHECK;
1827 request->wa_tail = intel_ring_offset(request, cs);
1830 static void gen8_emit_breadcrumb(struct drm_i915_gem_request *request, u32 *cs)
1832 /* w/a: bit 5 needs to be zero for MI_FLUSH_DW address. */
1833 BUILD_BUG_ON(I915_GEM_HWS_INDEX_ADDR & (1 << 5));
1835 cs = gen8_emit_ggtt_write(cs, request->global_seqno,
1836 intel_hws_seqno_address(request->engine));
1837 *cs++ = MI_USER_INTERRUPT;
1839 request->tail = intel_ring_offset(request, cs);
1840 assert_ring_tail_valid(request->ring, request->tail);
1842 gen8_emit_wa_tail(request, cs);
1844 static const int gen8_emit_breadcrumb_sz = 6 + WA_TAIL_DWORDS;
1846 static void gen8_emit_breadcrumb_rcs(struct drm_i915_gem_request *request,
1849 /* We're using qword write, seqno should be aligned to 8 bytes. */
1850 BUILD_BUG_ON(I915_GEM_HWS_INDEX & 1);
1852 cs = gen8_emit_ggtt_write_rcs(cs, request->global_seqno,
1853 intel_hws_seqno_address(request->engine));
1854 *cs++ = MI_USER_INTERRUPT;
1856 request->tail = intel_ring_offset(request, cs);
1857 assert_ring_tail_valid(request->ring, request->tail);
1859 gen8_emit_wa_tail(request, cs);
1861 static const int gen8_emit_breadcrumb_rcs_sz = 8 + WA_TAIL_DWORDS;
1863 static int gen8_init_rcs_context(struct drm_i915_gem_request *req)
1867 ret = intel_ring_workarounds_emit(req);
1871 ret = intel_rcs_context_init_mocs(req);
1873 * Failing to program the MOCS is non-fatal.The system will not
1874 * run at peak performance. So generate an error and carry on.
1877 DRM_ERROR("MOCS failed to program: expect performance issues.\n");
1879 return i915_gem_render_state_emit(req);
1883 * intel_logical_ring_cleanup() - deallocate the Engine Command Streamer
1884 * @engine: Engine Command Streamer.
1886 void intel_logical_ring_cleanup(struct intel_engine_cs *engine)
1888 struct drm_i915_private *dev_priv;
1891 * Tasklet cannot be active at this point due intel_mark_active/idle
1892 * so this is just for documentation.
1894 if (WARN_ON(test_bit(TASKLET_STATE_SCHED,
1895 &engine->execlists.tasklet.state)))
1896 tasklet_kill(&engine->execlists.tasklet);
1898 dev_priv = engine->i915;
1900 if (engine->buffer) {
1901 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
1904 if (engine->cleanup)
1905 engine->cleanup(engine);
1907 intel_engine_cleanup_common(engine);
1909 lrc_destroy_wa_ctx(engine);
1911 engine->i915 = NULL;
1912 dev_priv->engine[engine->id] = NULL;
1916 static void execlists_set_default_submission(struct intel_engine_cs *engine)
1918 engine->submit_request = execlists_submit_request;
1919 engine->cancel_requests = execlists_cancel_requests;
1920 engine->schedule = execlists_schedule;
1921 engine->execlists.tasklet.func = execlists_submission_tasklet;
1923 engine->park = NULL;
1924 engine->unpark = NULL;
1926 engine->flags |= I915_ENGINE_SUPPORTS_STATS;
1930 logical_ring_default_vfuncs(struct intel_engine_cs *engine)
1932 /* Default vfuncs which can be overriden by each engine. */
1933 engine->init_hw = gen8_init_common_ring;
1934 engine->reset_hw = reset_common_ring;
1936 engine->context_pin = execlists_context_pin;
1937 engine->context_unpin = execlists_context_unpin;
1939 engine->request_alloc = execlists_request_alloc;
1941 engine->emit_flush = gen8_emit_flush;
1942 engine->emit_breadcrumb = gen8_emit_breadcrumb;
1943 engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_sz;
1945 engine->set_default_submission = execlists_set_default_submission;
1947 engine->irq_enable = gen8_logical_ring_enable_irq;
1948 engine->irq_disable = gen8_logical_ring_disable_irq;
1949 engine->emit_bb_start = gen8_emit_bb_start;
1953 logical_ring_default_irqs(struct intel_engine_cs *engine)
1955 unsigned shift = engine->irq_shift;
1956 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT << shift;
1957 engine->irq_keep_mask = GT_CONTEXT_SWITCH_INTERRUPT << shift;
1961 logical_ring_setup(struct intel_engine_cs *engine)
1963 struct drm_i915_private *dev_priv = engine->i915;
1964 enum forcewake_domains fw_domains;
1966 intel_engine_setup_common(engine);
1968 /* Intentionally left blank. */
1969 engine->buffer = NULL;
1971 fw_domains = intel_uncore_forcewake_for_reg(dev_priv,
1975 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
1976 RING_CONTEXT_STATUS_PTR(engine),
1977 FW_REG_READ | FW_REG_WRITE);
1979 fw_domains |= intel_uncore_forcewake_for_reg(dev_priv,
1980 RING_CONTEXT_STATUS_BUF_BASE(engine),
1983 engine->execlists.fw_domains = fw_domains;
1985 tasklet_init(&engine->execlists.tasklet,
1986 execlists_submission_tasklet, (unsigned long)engine);
1988 logical_ring_default_vfuncs(engine);
1989 logical_ring_default_irqs(engine);
1992 static int logical_ring_init(struct intel_engine_cs *engine)
1996 ret = intel_engine_init_common(engine);
2000 engine->execlists.elsp =
2001 engine->i915->regs + i915_mmio_reg_offset(RING_ELSP(engine));
2006 intel_logical_ring_cleanup(engine);
2010 int logical_render_ring_init(struct intel_engine_cs *engine)
2012 struct drm_i915_private *dev_priv = engine->i915;
2015 logical_ring_setup(engine);
2017 if (HAS_L3_DPF(dev_priv))
2018 engine->irq_keep_mask |= GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2020 /* Override some for render ring. */
2021 if (INTEL_GEN(dev_priv) >= 9)
2022 engine->init_hw = gen9_init_render_ring;
2024 engine->init_hw = gen8_init_render_ring;
2025 engine->init_context = gen8_init_rcs_context;
2026 engine->emit_flush = gen8_emit_flush_render;
2027 engine->emit_breadcrumb = gen8_emit_breadcrumb_rcs;
2028 engine->emit_breadcrumb_sz = gen8_emit_breadcrumb_rcs_sz;
2030 ret = intel_engine_create_scratch(engine, PAGE_SIZE);
2034 ret = intel_init_workaround_bb(engine);
2037 * We continue even if we fail to initialize WA batch
2038 * because we only expect rare glitches but nothing
2039 * critical to prevent us from using GPU
2041 DRM_ERROR("WA batch buffer initialization failed: %d\n",
2045 return logical_ring_init(engine);
2048 int logical_xcs_ring_init(struct intel_engine_cs *engine)
2050 logical_ring_setup(engine);
2052 return logical_ring_init(engine);
2056 make_rpcs(struct drm_i915_private *dev_priv)
2061 * No explicit RPCS request is needed to ensure full
2062 * slice/subslice/EU enablement prior to Gen9.
2064 if (INTEL_GEN(dev_priv) < 9)
2068 * Starting in Gen9, render power gating can leave
2069 * slice/subslice/EU in a partially enabled state. We
2070 * must make an explicit request through RPCS for full
2073 if (INTEL_INFO(dev_priv)->sseu.has_slice_pg) {
2074 rpcs |= GEN8_RPCS_S_CNT_ENABLE;
2075 rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.slice_mask) <<
2076 GEN8_RPCS_S_CNT_SHIFT;
2077 rpcs |= GEN8_RPCS_ENABLE;
2080 if (INTEL_INFO(dev_priv)->sseu.has_subslice_pg) {
2081 rpcs |= GEN8_RPCS_SS_CNT_ENABLE;
2082 rpcs |= hweight8(INTEL_INFO(dev_priv)->sseu.subslice_mask) <<
2083 GEN8_RPCS_SS_CNT_SHIFT;
2084 rpcs |= GEN8_RPCS_ENABLE;
2087 if (INTEL_INFO(dev_priv)->sseu.has_eu_pg) {
2088 rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
2089 GEN8_RPCS_EU_MIN_SHIFT;
2090 rpcs |= INTEL_INFO(dev_priv)->sseu.eu_per_subslice <<
2091 GEN8_RPCS_EU_MAX_SHIFT;
2092 rpcs |= GEN8_RPCS_ENABLE;
2098 static u32 intel_lr_indirect_ctx_offset(struct intel_engine_cs *engine)
2100 u32 indirect_ctx_offset;
2102 switch (INTEL_GEN(engine->i915)) {
2104 MISSING_CASE(INTEL_GEN(engine->i915));
2107 indirect_ctx_offset =
2108 GEN10_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2111 indirect_ctx_offset =
2112 GEN9_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2115 indirect_ctx_offset =
2116 GEN8_CTX_RCS_INDIRECT_CTX_OFFSET_DEFAULT;
2120 return indirect_ctx_offset;
2123 static void execlists_init_reg_state(u32 *regs,
2124 struct i915_gem_context *ctx,
2125 struct intel_engine_cs *engine,
2126 struct intel_ring *ring)
2128 struct drm_i915_private *dev_priv = engine->i915;
2129 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: dev_priv->mm.aliasing_ppgtt;
2130 u32 base = engine->mmio_base;
2131 bool rcs = engine->id == RCS;
2133 /* A context is actually a big batch buffer with several
2134 * MI_LOAD_REGISTER_IMM commands followed by (reg, value) pairs. The
2135 * values we are setting here are only for the first context restore:
2136 * on a subsequent save, the GPU will recreate this batchbuffer with new
2137 * values (including all the missing MI_LOAD_REGISTER_IMM commands that
2138 * we are not initializing here).
2140 regs[CTX_LRI_HEADER_0] = MI_LOAD_REGISTER_IMM(rcs ? 14 : 11) |
2141 MI_LRI_FORCE_POSTED;
2143 CTX_REG(regs, CTX_CONTEXT_CONTROL, RING_CONTEXT_CONTROL(engine),
2144 _MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
2145 CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT) |
2146 _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
2147 (HAS_RESOURCE_STREAMER(dev_priv) ?
2148 CTX_CTRL_RS_CTX_ENABLE : 0)));
2149 CTX_REG(regs, CTX_RING_HEAD, RING_HEAD(base), 0);
2150 CTX_REG(regs, CTX_RING_TAIL, RING_TAIL(base), 0);
2151 CTX_REG(regs, CTX_RING_BUFFER_START, RING_START(base), 0);
2152 CTX_REG(regs, CTX_RING_BUFFER_CONTROL, RING_CTL(base),
2153 RING_CTL_SIZE(ring->size) | RING_VALID);
2154 CTX_REG(regs, CTX_BB_HEAD_U, RING_BBADDR_UDW(base), 0);
2155 CTX_REG(regs, CTX_BB_HEAD_L, RING_BBADDR(base), 0);
2156 CTX_REG(regs, CTX_BB_STATE, RING_BBSTATE(base), RING_BB_PPGTT);
2157 CTX_REG(regs, CTX_SECOND_BB_HEAD_U, RING_SBBADDR_UDW(base), 0);
2158 CTX_REG(regs, CTX_SECOND_BB_HEAD_L, RING_SBBADDR(base), 0);
2159 CTX_REG(regs, CTX_SECOND_BB_STATE, RING_SBBSTATE(base), 0);
2161 struct i915_ctx_workarounds *wa_ctx = &engine->wa_ctx;
2163 CTX_REG(regs, CTX_RCS_INDIRECT_CTX, RING_INDIRECT_CTX(base), 0);
2164 CTX_REG(regs, CTX_RCS_INDIRECT_CTX_OFFSET,
2165 RING_INDIRECT_CTX_OFFSET(base), 0);
2166 if (wa_ctx->indirect_ctx.size) {
2167 u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
2169 regs[CTX_RCS_INDIRECT_CTX + 1] =
2170 (ggtt_offset + wa_ctx->indirect_ctx.offset) |
2171 (wa_ctx->indirect_ctx.size / CACHELINE_BYTES);
2173 regs[CTX_RCS_INDIRECT_CTX_OFFSET + 1] =
2174 intel_lr_indirect_ctx_offset(engine) << 6;
2177 CTX_REG(regs, CTX_BB_PER_CTX_PTR, RING_BB_PER_CTX_PTR(base), 0);
2178 if (wa_ctx->per_ctx.size) {
2179 u32 ggtt_offset = i915_ggtt_offset(wa_ctx->vma);
2181 regs[CTX_BB_PER_CTX_PTR + 1] =
2182 (ggtt_offset + wa_ctx->per_ctx.offset) | 0x01;
2186 regs[CTX_LRI_HEADER_1] = MI_LOAD_REGISTER_IMM(9) | MI_LRI_FORCE_POSTED;
2188 CTX_REG(regs, CTX_CTX_TIMESTAMP, RING_CTX_TIMESTAMP(base), 0);
2189 /* PDP values well be assigned later if needed */
2190 CTX_REG(regs, CTX_PDP3_UDW, GEN8_RING_PDP_UDW(engine, 3), 0);
2191 CTX_REG(regs, CTX_PDP3_LDW, GEN8_RING_PDP_LDW(engine, 3), 0);
2192 CTX_REG(regs, CTX_PDP2_UDW, GEN8_RING_PDP_UDW(engine, 2), 0);
2193 CTX_REG(regs, CTX_PDP2_LDW, GEN8_RING_PDP_LDW(engine, 2), 0);
2194 CTX_REG(regs, CTX_PDP1_UDW, GEN8_RING_PDP_UDW(engine, 1), 0);
2195 CTX_REG(regs, CTX_PDP1_LDW, GEN8_RING_PDP_LDW(engine, 1), 0);
2196 CTX_REG(regs, CTX_PDP0_UDW, GEN8_RING_PDP_UDW(engine, 0), 0);
2197 CTX_REG(regs, CTX_PDP0_LDW, GEN8_RING_PDP_LDW(engine, 0), 0);
2199 if (ppgtt && i915_vm_is_48bit(&ppgtt->base)) {
2200 /* 64b PPGTT (48bit canonical)
2201 * PDP0_DESCRIPTOR contains the base address to PML4 and
2202 * other PDP Descriptors are ignored.
2204 ASSIGN_CTX_PML4(ppgtt, regs);
2208 regs[CTX_LRI_HEADER_2] = MI_LOAD_REGISTER_IMM(1);
2209 CTX_REG(regs, CTX_R_PWR_CLK_STATE, GEN8_R_PWR_CLK_STATE,
2210 make_rpcs(dev_priv));
2212 i915_oa_init_reg_state(engine, ctx, regs);
2217 populate_lr_context(struct i915_gem_context *ctx,
2218 struct drm_i915_gem_object *ctx_obj,
2219 struct intel_engine_cs *engine,
2220 struct intel_ring *ring)
2226 ret = i915_gem_object_set_to_cpu_domain(ctx_obj, true);
2228 DRM_DEBUG_DRIVER("Could not set to CPU domain\n");
2232 vaddr = i915_gem_object_pin_map(ctx_obj, I915_MAP_WB);
2233 if (IS_ERR(vaddr)) {
2234 ret = PTR_ERR(vaddr);
2235 DRM_DEBUG_DRIVER("Could not map object pages! (%d)\n", ret);
2238 ctx_obj->mm.dirty = true;
2240 if (engine->default_state) {
2242 * We only want to copy over the template context state;
2243 * skipping over the headers reserved for GuC communication,
2244 * leaving those as zero.
2246 const unsigned long start = LRC_HEADER_PAGES * PAGE_SIZE;
2249 defaults = i915_gem_object_pin_map(engine->default_state,
2251 if (IS_ERR(defaults))
2252 return PTR_ERR(defaults);
2254 memcpy(vaddr + start, defaults + start, engine->context_size);
2255 i915_gem_object_unpin_map(engine->default_state);
2258 /* The second page of the context object contains some fields which must
2259 * be set up prior to the first execution. */
2260 regs = vaddr + LRC_STATE_PN * PAGE_SIZE;
2261 execlists_init_reg_state(regs, ctx, engine, ring);
2262 if (!engine->default_state)
2263 regs[CTX_CONTEXT_CONTROL + 1] |=
2264 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
2265 if (ctx->hw_id == PREEMPT_ID)
2266 regs[CTX_CONTEXT_CONTROL + 1] |=
2267 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT |
2268 CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT);
2270 i915_gem_object_unpin_map(ctx_obj);
2275 static int execlists_context_deferred_alloc(struct i915_gem_context *ctx,
2276 struct intel_engine_cs *engine)
2278 struct drm_i915_gem_object *ctx_obj;
2279 struct intel_context *ce = &ctx->engine[engine->id];
2280 struct i915_vma *vma;
2281 uint32_t context_size;
2282 struct intel_ring *ring;
2288 context_size = round_up(engine->context_size, I915_GTT_PAGE_SIZE);
2291 * Before the actual start of the context image, we insert a few pages
2292 * for our own use and for sharing with the GuC.
2294 context_size += LRC_HEADER_PAGES * PAGE_SIZE;
2296 ctx_obj = i915_gem_object_create(ctx->i915, context_size);
2297 if (IS_ERR(ctx_obj)) {
2298 DRM_DEBUG_DRIVER("Alloc LRC backing obj failed.\n");
2299 return PTR_ERR(ctx_obj);
2302 vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
2305 goto error_deref_obj;
2308 ring = intel_engine_create_ring(engine, ctx->ring_size);
2310 ret = PTR_ERR(ring);
2311 goto error_deref_obj;
2314 ret = populate_lr_context(ctx, ctx_obj, engine, ring);
2316 DRM_DEBUG_DRIVER("Failed to populate LRC: %d\n", ret);
2317 goto error_ring_free;
2326 intel_ring_free(ring);
2328 i915_gem_object_put(ctx_obj);
2332 void intel_lr_context_resume(struct drm_i915_private *dev_priv)
2334 struct intel_engine_cs *engine;
2335 struct i915_gem_context *ctx;
2336 enum intel_engine_id id;
2338 /* Because we emit WA_TAIL_DWORDS there may be a disparity
2339 * between our bookkeeping in ce->ring->head and ce->ring->tail and
2340 * that stored in context. As we only write new commands from
2341 * ce->ring->tail onwards, everything before that is junk. If the GPU
2342 * starts reading from its RING_HEAD from the context, it may try to
2343 * execute that junk and die.
2345 * So to avoid that we reset the context images upon resume. For
2346 * simplicity, we just zero everything out.
2348 list_for_each_entry(ctx, &dev_priv->contexts.list, link) {
2349 for_each_engine(engine, dev_priv, id) {
2350 struct intel_context *ce = &ctx->engine[engine->id];
2356 reg = i915_gem_object_pin_map(ce->state->obj,
2358 if (WARN_ON(IS_ERR(reg)))
2361 reg += LRC_STATE_PN * PAGE_SIZE / sizeof(*reg);
2362 reg[CTX_RING_HEAD+1] = 0;
2363 reg[CTX_RING_TAIL+1] = 0;
2365 ce->state->obj->mm.dirty = true;
2366 i915_gem_object_unpin_map(ce->state->obj);
2368 intel_ring_reset(ce->ring, 0);