2 * Copyright © 2008-2010 Intel Corporation
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
24 * Eric Anholt <eric@anholt.net>
25 * Zou Nan hai <nanhai.zou@intel.com>
26 * Xiang Hai hao<haihao.xiang@intel.com>
30 #include <linux/log2.h>
32 #include <drm/i915_drm.h>
35 #include "i915_gem_render_state.h"
36 #include "i915_trace.h"
37 #include "intel_drv.h"
38 #include "intel_workarounds.h"
40 /* Rough estimate of the typical request size, performing a flush,
41 * set-context and then emitting the batch.
43 #define LEGACY_REQUEST_SIZE 200
45 static unsigned int __intel_ring_space(unsigned int head,
50 * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
51 * same cacheline, the Head Pointer must not be greater than the Tail
54 GEM_BUG_ON(!is_power_of_2(size));
55 return (head - tail - CACHELINE_BYTES) & (size - 1);
58 unsigned int intel_ring_update_space(struct intel_ring *ring)
62 space = __intel_ring_space(ring->head, ring->emit, ring->size);
69 gen2_render_ring_flush(struct i915_request *rq, u32 mode)
71 unsigned int num_store_dw;
76 if (mode & EMIT_INVALIDATE)
78 if (mode & EMIT_FLUSH)
81 cs = intel_ring_begin(rq, 2 + 3 * num_store_dw);
86 while (num_store_dw--) {
87 *cs++ = MI_STORE_DWORD_IMM | MI_MEM_VIRTUAL;
88 *cs++ = i915_scratch_offset(rq->i915);
91 *cs++ = MI_FLUSH | MI_NO_WRITE_FLUSH;
93 intel_ring_advance(rq, cs);
99 gen4_render_ring_flush(struct i915_request *rq, u32 mode)
107 * I915_GEM_DOMAIN_RENDER is always invalidated, but is
108 * only flushed if MI_NO_WRITE_FLUSH is unset. On 965, it is
109 * also flushed at 2d versus 3d pipeline switches.
113 * I915_GEM_DOMAIN_SAMPLER is flushed on pre-965 if
114 * MI_READ_FLUSH is set, and is always flushed on 965.
116 * I915_GEM_DOMAIN_COMMAND may not exist?
118 * I915_GEM_DOMAIN_INSTRUCTION, which exists on 965, is
119 * invalidated when MI_EXE_FLUSH is set.
121 * I915_GEM_DOMAIN_VERTEX, which exists on 965, is
122 * invalidated with every MI_FLUSH.
126 * On 965, TLBs associated with I915_GEM_DOMAIN_COMMAND
127 * and I915_GEM_DOMAIN_CPU in are invalidated at PTE write and
128 * I915_GEM_DOMAIN_RENDER and I915_GEM_DOMAIN_SAMPLER
129 * are flushed at any MI_FLUSH.
133 if (mode & EMIT_INVALIDATE) {
135 if (IS_G4X(rq->i915) || IS_GEN(rq->i915, 5))
136 cmd |= MI_INVALIDATE_ISP;
140 if (mode & EMIT_INVALIDATE)
143 cs = intel_ring_begin(rq, i);
150 * A random delay to let the CS invalidate take effect? Without this
151 * delay, the GPU relocation path fails as the CS does not see
152 * the updated contents. Just as important, if we apply the flushes
153 * to the EMIT_FLUSH branch (i.e. immediately after the relocation
154 * write and before the invalidate on the next batch), the relocations
155 * still fail. This implies that is a delay following invalidation
156 * that is required to reset the caches as opposed to a delay to
157 * ensure the memory is written.
159 if (mode & EMIT_INVALIDATE) {
160 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
161 *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
165 for (i = 0; i < 12; i++)
168 *cs++ = GFX_OP_PIPE_CONTROL(4) | PIPE_CONTROL_QW_WRITE;
169 *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
176 intel_ring_advance(rq, cs);
182 * Emits a PIPE_CONTROL with a non-zero post-sync operation, for
183 * implementing two workarounds on gen6. From section 1.4.7.1
184 * "PIPE_CONTROL" of the Sandy Bridge PRM volume 2 part 1:
186 * [DevSNB-C+{W/A}] Before any depth stall flush (including those
187 * produced by non-pipelined state commands), software needs to first
188 * send a PIPE_CONTROL with no bits set except Post-Sync Operation !=
191 * [Dev-SNB{W/A}]: Before a PIPE_CONTROL with Write Cache Flush Enable
192 * =1, a PIPE_CONTROL with any non-zero post-sync-op is required.
194 * And the workaround for these two requires this workaround first:
196 * [Dev-SNB{W/A}]: Pipe-control with CS-stall bit set must be sent
197 * BEFORE the pipe-control with a post-sync op and no write-cache
200 * And this last workaround is tricky because of the requirements on
201 * that bit. From section 1.4.7.2.3 "Stall" of the Sandy Bridge PRM
204 * "1 of the following must also be set:
205 * - Render Target Cache Flush Enable ([12] of DW1)
206 * - Depth Cache Flush Enable ([0] of DW1)
207 * - Stall at Pixel Scoreboard ([1] of DW1)
208 * - Depth Stall ([13] of DW1)
209 * - Post-Sync Operation ([13] of DW1)
210 * - Notify Enable ([8] of DW1)"
212 * The cache flushes require the workaround flush that triggered this
213 * one, so we can't use it. Depth stall would trigger the same.
214 * Post-sync nonzero is what triggered this second workaround, so we
215 * can't use that one either. Notify enable is IRQs, which aren't
216 * really our business. That leaves only stall at scoreboard.
219 gen6_emit_post_sync_nonzero_flush(struct i915_request *rq)
221 u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
224 cs = intel_ring_begin(rq, 6);
228 *cs++ = GFX_OP_PIPE_CONTROL(5);
229 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
230 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
231 *cs++ = 0; /* low dword */
232 *cs++ = 0; /* high dword */
234 intel_ring_advance(rq, cs);
236 cs = intel_ring_begin(rq, 6);
240 *cs++ = GFX_OP_PIPE_CONTROL(5);
241 *cs++ = PIPE_CONTROL_QW_WRITE;
242 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
246 intel_ring_advance(rq, cs);
252 gen6_render_ring_flush(struct i915_request *rq, u32 mode)
254 u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
258 /* Force SNB workarounds for PIPE_CONTROL flushes */
259 ret = gen6_emit_post_sync_nonzero_flush(rq);
263 /* Just flush everything. Experiments have shown that reducing the
264 * number of bits based on the write domains has little performance
267 if (mode & EMIT_FLUSH) {
268 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
269 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
271 * Ensure that any following seqno writes only happen
272 * when the render cache is indeed flushed.
274 flags |= PIPE_CONTROL_CS_STALL;
276 if (mode & EMIT_INVALIDATE) {
277 flags |= PIPE_CONTROL_TLB_INVALIDATE;
278 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
279 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
280 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
281 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
282 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
284 * TLB invalidate requires a post-sync write.
286 flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
289 cs = intel_ring_begin(rq, 4);
293 *cs++ = GFX_OP_PIPE_CONTROL(4);
295 *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
297 intel_ring_advance(rq, cs);
302 static u32 *gen6_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
304 /* First we do the gen6_emit_post_sync_nonzero_flush w/a */
305 *cs++ = GFX_OP_PIPE_CONTROL(4);
306 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
310 *cs++ = GFX_OP_PIPE_CONTROL(4);
311 *cs++ = PIPE_CONTROL_QW_WRITE;
312 *cs++ = i915_scratch_offset(rq->i915) | PIPE_CONTROL_GLOBAL_GTT;
315 /* Finally we can flush and with it emit the breadcrumb */
316 *cs++ = GFX_OP_PIPE_CONTROL(4);
317 *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
318 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
319 PIPE_CONTROL_DC_FLUSH_ENABLE |
320 PIPE_CONTROL_QW_WRITE |
321 PIPE_CONTROL_CS_STALL);
322 *cs++ = intel_hws_seqno_address(rq->engine) | PIPE_CONTROL_GLOBAL_GTT;
323 *cs++ = rq->global_seqno;
325 *cs++ = MI_USER_INTERRUPT;
328 rq->tail = intel_ring_offset(rq, cs);
329 assert_ring_tail_valid(rq->ring, rq->tail);
335 gen7_render_ring_cs_stall_wa(struct i915_request *rq)
339 cs = intel_ring_begin(rq, 4);
343 *cs++ = GFX_OP_PIPE_CONTROL(4);
344 *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
347 intel_ring_advance(rq, cs);
353 gen7_render_ring_flush(struct i915_request *rq, u32 mode)
355 u32 scratch_addr = i915_scratch_offset(rq->i915) + 2 * CACHELINE_BYTES;
359 * Ensure that any following seqno writes only happen when the render
360 * cache is indeed flushed.
362 * Workaround: 4th PIPE_CONTROL command (except the ones with only
363 * read-cache invalidate bits set) must have the CS_STALL bit set. We
364 * don't try to be clever and just set it unconditionally.
366 flags |= PIPE_CONTROL_CS_STALL;
368 /* Just flush everything. Experiments have shown that reducing the
369 * number of bits based on the write domains has little performance
372 if (mode & EMIT_FLUSH) {
373 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
374 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
375 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
376 flags |= PIPE_CONTROL_FLUSH_ENABLE;
378 if (mode & EMIT_INVALIDATE) {
379 flags |= PIPE_CONTROL_TLB_INVALIDATE;
380 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
381 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
382 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
383 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
384 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
385 flags |= PIPE_CONTROL_MEDIA_STATE_CLEAR;
387 * TLB invalidate requires a post-sync write.
389 flags |= PIPE_CONTROL_QW_WRITE;
390 flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
392 flags |= PIPE_CONTROL_STALL_AT_SCOREBOARD;
394 /* Workaround: we must issue a pipe_control with CS-stall bit
395 * set before a pipe_control command that has the state cache
396 * invalidate bit set. */
397 gen7_render_ring_cs_stall_wa(rq);
400 cs = intel_ring_begin(rq, 4);
404 *cs++ = GFX_OP_PIPE_CONTROL(4);
406 *cs++ = scratch_addr;
408 intel_ring_advance(rq, cs);
413 static u32 *gen7_rcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
415 *cs++ = GFX_OP_PIPE_CONTROL(4);
416 *cs++ = (PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
417 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
418 PIPE_CONTROL_DC_FLUSH_ENABLE |
419 PIPE_CONTROL_FLUSH_ENABLE |
420 PIPE_CONTROL_QW_WRITE |
421 PIPE_CONTROL_GLOBAL_GTT_IVB |
422 PIPE_CONTROL_CS_STALL);
423 *cs++ = intel_hws_seqno_address(rq->engine);
424 *cs++ = rq->global_seqno;
426 *cs++ = MI_USER_INTERRUPT;
429 rq->tail = intel_ring_offset(rq, cs);
430 assert_ring_tail_valid(rq->ring, rq->tail);
435 static u32 *gen6_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
437 *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW;
438 *cs++ = intel_hws_seqno_address(rq->engine) | MI_FLUSH_DW_USE_GTT;
439 *cs++ = rq->global_seqno;
440 *cs++ = MI_USER_INTERRUPT;
442 rq->tail = intel_ring_offset(rq, cs);
443 assert_ring_tail_valid(rq->ring, rq->tail);
448 #define GEN7_XCS_WA 32
449 static u32 *gen7_xcs_emit_breadcrumb(struct i915_request *rq, u32 *cs)
453 *cs++ = MI_FLUSH_DW | MI_FLUSH_DW_OP_STOREDW;
454 *cs++ = intel_hws_seqno_address(rq->engine) | MI_FLUSH_DW_USE_GTT;
455 *cs++ = rq->global_seqno;
457 for (i = 0; i < GEN7_XCS_WA; i++) {
458 *cs++ = MI_STORE_DWORD_INDEX;
459 *cs++ = I915_GEM_HWS_INDEX_ADDR;
460 *cs++ = rq->global_seqno;
467 *cs++ = MI_USER_INTERRUPT;
470 rq->tail = intel_ring_offset(rq, cs);
471 assert_ring_tail_valid(rq->ring, rq->tail);
477 static void set_hwstam(struct intel_engine_cs *engine, u32 mask)
480 * Keep the render interrupt unmasked as this papers over
481 * lost interrupts following a reset.
483 if (engine->class == RENDER_CLASS) {
484 if (INTEL_GEN(engine->i915) >= 6)
487 mask &= ~I915_USER_INTERRUPT;
490 intel_engine_set_hwsp_writemask(engine, mask);
493 static void set_hws_pga(struct intel_engine_cs *engine, phys_addr_t phys)
495 struct drm_i915_private *dev_priv = engine->i915;
498 addr = lower_32_bits(phys);
499 if (INTEL_GEN(dev_priv) >= 4)
500 addr |= (phys >> 28) & 0xf0;
502 I915_WRITE(HWS_PGA, addr);
505 static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
507 struct page *page = virt_to_page(engine->status_page.page_addr);
508 phys_addr_t phys = PFN_PHYS(page_to_pfn(page));
510 set_hws_pga(engine, phys);
511 set_hwstam(engine, ~0u);
514 static void set_hwsp(struct intel_engine_cs *engine, u32 offset)
516 struct drm_i915_private *dev_priv = engine->i915;
520 * The ring status page addresses are no longer next to the rest of
521 * the ring registers as of gen7.
523 if (IS_GEN(dev_priv, 7)) {
524 switch (engine->id) {
526 * No more rings exist on Gen7. Default case is only to shut up
527 * gcc switch check warning.
530 GEM_BUG_ON(engine->id);
532 hwsp = RENDER_HWS_PGA_GEN7;
535 hwsp = BLT_HWS_PGA_GEN7;
538 hwsp = BSD_HWS_PGA_GEN7;
541 hwsp = VEBOX_HWS_PGA_GEN7;
544 } else if (IS_GEN(dev_priv, 6)) {
545 hwsp = RING_HWS_PGA_GEN6(engine->mmio_base);
547 hwsp = RING_HWS_PGA(engine->mmio_base);
550 I915_WRITE(hwsp, offset);
554 static void flush_cs_tlb(struct intel_engine_cs *engine)
556 struct drm_i915_private *dev_priv = engine->i915;
557 i915_reg_t instpm = RING_INSTPM(engine->mmio_base);
559 if (!IS_GEN_RANGE(dev_priv, 6, 7))
562 /* ring should be idle before issuing a sync flush*/
563 WARN_ON((I915_READ_MODE(engine) & MODE_IDLE) == 0);
566 _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE |
568 if (intel_wait_for_register(dev_priv,
569 instpm, INSTPM_SYNC_FLUSH, 0,
571 DRM_ERROR("%s: wait for SyncFlush to complete for TLB invalidation timed out\n",
575 static void ring_setup_status_page(struct intel_engine_cs *engine)
577 set_hwsp(engine, engine->status_page.ggtt_offset);
578 set_hwstam(engine, ~0u);
580 flush_cs_tlb(engine);
583 static bool stop_ring(struct intel_engine_cs *engine)
585 struct drm_i915_private *dev_priv = engine->i915;
587 if (INTEL_GEN(dev_priv) > 2) {
588 I915_WRITE_MODE(engine, _MASKED_BIT_ENABLE(STOP_RING));
589 if (intel_wait_for_register(dev_priv,
590 RING_MI_MODE(engine->mmio_base),
594 DRM_ERROR("%s : timed out trying to stop ring\n",
596 /* Sometimes we observe that the idle flag is not
597 * set even though the ring is empty. So double
598 * check before giving up.
600 if (I915_READ_HEAD(engine) != I915_READ_TAIL(engine))
605 I915_WRITE_HEAD(engine, I915_READ_TAIL(engine));
607 I915_WRITE_HEAD(engine, 0);
608 I915_WRITE_TAIL(engine, 0);
610 /* The ring must be empty before it is disabled */
611 I915_WRITE_CTL(engine, 0);
613 return (I915_READ_HEAD(engine) & HEAD_ADDR) == 0;
616 static int init_ring_common(struct intel_engine_cs *engine)
618 struct drm_i915_private *dev_priv = engine->i915;
619 struct intel_ring *ring = engine->buffer;
622 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
624 if (!stop_ring(engine)) {
625 /* G45 ring initialization often fails to reset head to zero */
626 DRM_DEBUG_DRIVER("%s head not reset to zero "
627 "ctl %08x head %08x tail %08x start %08x\n",
629 I915_READ_CTL(engine),
630 I915_READ_HEAD(engine),
631 I915_READ_TAIL(engine),
632 I915_READ_START(engine));
634 if (!stop_ring(engine)) {
635 DRM_ERROR("failed to set %s head to zero "
636 "ctl %08x head %08x tail %08x start %08x\n",
638 I915_READ_CTL(engine),
639 I915_READ_HEAD(engine),
640 I915_READ_TAIL(engine),
641 I915_READ_START(engine));
647 if (HWS_NEEDS_PHYSICAL(dev_priv))
648 ring_setup_phys_status_page(engine);
650 ring_setup_status_page(engine);
652 intel_engine_reset_breadcrumbs(engine);
654 /* Enforce ordering by reading HEAD register back */
655 I915_READ_HEAD(engine);
657 /* Initialize the ring. This must happen _after_ we've cleared the ring
658 * registers with the above sequence (the readback of the HEAD registers
659 * also enforces ordering), otherwise the hw might lose the new ring
660 * register values. */
661 I915_WRITE_START(engine, i915_ggtt_offset(ring->vma));
663 /* WaClearRingBufHeadRegAtInit:ctg,elk */
664 if (I915_READ_HEAD(engine))
665 DRM_DEBUG_DRIVER("%s initialization failed [head=%08x], fudging\n",
666 engine->name, I915_READ_HEAD(engine));
668 /* Check that the ring offsets point within the ring! */
669 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->head));
670 GEM_BUG_ON(!intel_ring_offset_valid(ring, ring->tail));
671 intel_ring_update_space(ring);
673 /* First wake the ring up to an empty/idle ring */
674 I915_WRITE_HEAD(engine, ring->head);
675 I915_WRITE_TAIL(engine, ring->head);
676 (void)I915_READ_TAIL(engine);
678 I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
680 /* If the head is still not zero, the ring is dead */
681 if (intel_wait_for_register(dev_priv, RING_CTL(engine->mmio_base),
682 RING_VALID, RING_VALID,
684 DRM_ERROR("%s initialization failed "
685 "ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
687 I915_READ_CTL(engine),
688 I915_READ_CTL(engine) & RING_VALID,
689 I915_READ_HEAD(engine), ring->head,
690 I915_READ_TAIL(engine), ring->tail,
691 I915_READ_START(engine),
692 i915_ggtt_offset(ring->vma));
697 if (INTEL_GEN(dev_priv) > 2)
698 I915_WRITE_MODE(engine, _MASKED_BIT_DISABLE(STOP_RING));
700 /* Now awake, let it get started */
701 if (ring->tail != ring->head) {
702 I915_WRITE_TAIL(engine, ring->tail);
703 (void)I915_READ_TAIL(engine);
706 /* Papering over lost _interrupts_ immediately following the restart */
707 intel_engine_wakeup(engine);
709 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
714 static struct i915_request *reset_prepare(struct intel_engine_cs *engine)
716 intel_engine_stop_cs(engine);
717 return i915_gem_find_active_request(engine);
720 static void skip_request(struct i915_request *rq)
722 void *vaddr = rq->ring->vaddr;
726 if (rq->postfix < head) {
727 memset32(vaddr + head, MI_NOOP,
728 (rq->ring->size - head) / sizeof(u32));
731 memset32(vaddr + head, MI_NOOP, (rq->postfix - head) / sizeof(u32));
734 static void reset_ring(struct intel_engine_cs *engine, struct i915_request *rq)
736 GEM_TRACE("%s request global=%d, current=%d\n",
737 engine->name, rq ? rq->global_seqno : 0,
738 intel_engine_get_seqno(engine));
741 * Try to restore the logical GPU state to match the continuation
742 * of the request queue. If we skip the context/PD restore, then
743 * the next request may try to execute assuming that its context
744 * is valid and loaded on the GPU and so may try to access invalid
745 * memory, prompting repeated GPU hangs.
747 * If the request was guilty, we still restore the logical state
748 * in case the next request requires it (e.g. the aliasing ppgtt),
749 * but skip over the hung batch.
751 * If the request was innocent, we try to replay the request with
752 * the restored context.
755 /* If the rq hung, jump to its breadcrumb and skip the batch */
756 rq->ring->head = intel_ring_wrap(rq->ring, rq->head);
757 if (rq->fence.error == -EIO)
762 static void reset_finish(struct intel_engine_cs *engine)
766 static int intel_rcs_ctx_init(struct i915_request *rq)
770 ret = intel_engine_emit_ctx_wa(rq);
774 ret = i915_gem_render_state_emit(rq);
781 static int init_render_ring(struct intel_engine_cs *engine)
783 struct drm_i915_private *dev_priv = engine->i915;
784 int ret = init_ring_common(engine);
788 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
789 if (IS_GEN_RANGE(dev_priv, 4, 6))
790 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH));
792 /* We need to disable the AsyncFlip performance optimisations in order
793 * to use MI_WAIT_FOR_EVENT within the CS. It should already be
794 * programmed to '1' on all products.
796 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
798 if (IS_GEN_RANGE(dev_priv, 6, 7))
799 I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE));
801 /* Required for the hardware to program scanline values for waiting */
802 /* WaEnableFlushTlbInvalidationMode:snb */
803 if (IS_GEN(dev_priv, 6))
805 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT));
807 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
808 if (IS_GEN(dev_priv, 7))
809 I915_WRITE(GFX_MODE_GEN7,
810 _MASKED_BIT_ENABLE(GFX_TLB_INVALIDATE_EXPLICIT) |
811 _MASKED_BIT_ENABLE(GFX_REPLAY_MODE));
813 if (IS_GEN(dev_priv, 6)) {
814 /* From the Sandybridge PRM, volume 1 part 3, page 24:
815 * "If this bit is set, STCunit will have LRA as replacement
816 * policy. [...] This bit must be reset. LRA replacement
817 * policy is not supported."
819 I915_WRITE(CACHE_MODE_0,
820 _MASKED_BIT_DISABLE(CM0_STC_EVICT_DISABLE_LRA_SNB));
823 if (IS_GEN_RANGE(dev_priv, 6, 7))
824 I915_WRITE(INSTPM, _MASKED_BIT_ENABLE(INSTPM_FORCE_ORDERING));
826 if (INTEL_GEN(dev_priv) >= 6)
827 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
832 static void cancel_requests(struct intel_engine_cs *engine)
834 struct i915_request *request;
837 spin_lock_irqsave(&engine->timeline.lock, flags);
839 /* Mark all submitted requests as skipped. */
840 list_for_each_entry(request, &engine->timeline.requests, link) {
841 GEM_BUG_ON(!request->global_seqno);
843 if (i915_request_signaled(request))
846 dma_fence_set_error(&request->fence, -EIO);
849 intel_write_status_page(engine,
851 intel_engine_last_submit(engine));
853 /* Remaining _unready_ requests will be nop'ed when submitted */
855 spin_unlock_irqrestore(&engine->timeline.lock, flags);
858 static void i9xx_submit_request(struct i915_request *request)
860 struct drm_i915_private *dev_priv = request->i915;
862 i915_request_submit(request);
864 I915_WRITE_TAIL(request->engine,
865 intel_ring_set_tail(request->ring, request->tail));
868 static u32 *i9xx_emit_breadcrumb(struct i915_request *rq, u32 *cs)
872 *cs++ = MI_STORE_DWORD_INDEX;
873 *cs++ = I915_GEM_HWS_INDEX_ADDR;
874 *cs++ = rq->global_seqno;
876 *cs++ = MI_USER_INTERRUPT;
879 rq->tail = intel_ring_offset(rq, cs);
880 assert_ring_tail_valid(rq->ring, rq->tail);
885 #define GEN5_WA_STORES 8 /* must be at least 1! */
886 static u32 *gen5_emit_breadcrumb(struct i915_request *rq, u32 *cs)
892 BUILD_BUG_ON(GEN5_WA_STORES < 1);
893 for (i = 0; i < GEN5_WA_STORES; i++) {
894 *cs++ = MI_STORE_DWORD_INDEX;
895 *cs++ = I915_GEM_HWS_INDEX_ADDR;
896 *cs++ = rq->global_seqno;
899 *cs++ = MI_USER_INTERRUPT;
901 rq->tail = intel_ring_offset(rq, cs);
902 assert_ring_tail_valid(rq->ring, rq->tail);
906 #undef GEN5_WA_STORES
909 gen5_irq_enable(struct intel_engine_cs *engine)
911 gen5_enable_gt_irq(engine->i915, engine->irq_enable_mask);
915 gen5_irq_disable(struct intel_engine_cs *engine)
917 gen5_disable_gt_irq(engine->i915, engine->irq_enable_mask);
921 i9xx_irq_enable(struct intel_engine_cs *engine)
923 struct drm_i915_private *dev_priv = engine->i915;
925 dev_priv->irq_mask &= ~engine->irq_enable_mask;
926 I915_WRITE(IMR, dev_priv->irq_mask);
927 POSTING_READ_FW(RING_IMR(engine->mmio_base));
931 i9xx_irq_disable(struct intel_engine_cs *engine)
933 struct drm_i915_private *dev_priv = engine->i915;
935 dev_priv->irq_mask |= engine->irq_enable_mask;
936 I915_WRITE(IMR, dev_priv->irq_mask);
940 i8xx_irq_enable(struct intel_engine_cs *engine)
942 struct drm_i915_private *dev_priv = engine->i915;
944 dev_priv->irq_mask &= ~engine->irq_enable_mask;
945 I915_WRITE16(IMR, dev_priv->irq_mask);
946 POSTING_READ16(RING_IMR(engine->mmio_base));
950 i8xx_irq_disable(struct intel_engine_cs *engine)
952 struct drm_i915_private *dev_priv = engine->i915;
954 dev_priv->irq_mask |= engine->irq_enable_mask;
955 I915_WRITE16(IMR, dev_priv->irq_mask);
959 bsd_ring_flush(struct i915_request *rq, u32 mode)
963 cs = intel_ring_begin(rq, 2);
969 intel_ring_advance(rq, cs);
974 gen6_irq_enable(struct intel_engine_cs *engine)
976 struct drm_i915_private *dev_priv = engine->i915;
978 I915_WRITE_IMR(engine,
979 ~(engine->irq_enable_mask |
980 engine->irq_keep_mask));
982 /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
983 POSTING_READ_FW(RING_IMR(engine->mmio_base));
985 gen5_enable_gt_irq(dev_priv, engine->irq_enable_mask);
989 gen6_irq_disable(struct intel_engine_cs *engine)
991 struct drm_i915_private *dev_priv = engine->i915;
993 I915_WRITE_IMR(engine, ~engine->irq_keep_mask);
994 gen5_disable_gt_irq(dev_priv, engine->irq_enable_mask);
998 hsw_vebox_irq_enable(struct intel_engine_cs *engine)
1000 struct drm_i915_private *dev_priv = engine->i915;
1002 I915_WRITE_IMR(engine, ~engine->irq_enable_mask);
1004 /* Flush/delay to ensure the RING_IMR is active before the GT IMR */
1005 POSTING_READ_FW(RING_IMR(engine->mmio_base));
1007 gen6_unmask_pm_irq(dev_priv, engine->irq_enable_mask);
1011 hsw_vebox_irq_disable(struct intel_engine_cs *engine)
1013 struct drm_i915_private *dev_priv = engine->i915;
1015 I915_WRITE_IMR(engine, ~0);
1016 gen6_mask_pm_irq(dev_priv, engine->irq_enable_mask);
1020 i965_emit_bb_start(struct i915_request *rq,
1021 u64 offset, u32 length,
1022 unsigned int dispatch_flags)
1026 cs = intel_ring_begin(rq, 2);
1030 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
1031 I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
1033 intel_ring_advance(rq, cs);
1038 /* Just userspace ABI convention to limit the wa batch bo to a resonable size */
1039 #define I830_BATCH_LIMIT SZ_256K
1040 #define I830_TLB_ENTRIES (2)
1041 #define I830_WA_SIZE max(I830_TLB_ENTRIES*4096, I830_BATCH_LIMIT)
1043 i830_emit_bb_start(struct i915_request *rq,
1044 u64 offset, u32 len,
1045 unsigned int dispatch_flags)
1047 u32 *cs, cs_offset = i915_scratch_offset(rq->i915);
1049 GEM_BUG_ON(rq->i915->gt.scratch->size < I830_WA_SIZE);
1051 cs = intel_ring_begin(rq, 6);
1055 /* Evict the invalid PTE TLBs */
1056 *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
1057 *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
1058 *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
1062 intel_ring_advance(rq, cs);
1064 if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
1065 if (len > I830_BATCH_LIMIT)
1068 cs = intel_ring_begin(rq, 6 + 2);
1072 /* Blit the batch (which has now all relocs applied) to the
1073 * stable batch scratch bo area (so that the CS never
1074 * stumbles over its tlb invalidation bug) ...
1076 *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
1077 *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
1078 *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
1085 intel_ring_advance(rq, cs);
1087 /* ... and execute it. */
1091 cs = intel_ring_begin(rq, 2);
1095 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
1096 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
1097 MI_BATCH_NON_SECURE);
1098 intel_ring_advance(rq, cs);
1104 i915_emit_bb_start(struct i915_request *rq,
1105 u64 offset, u32 len,
1106 unsigned int dispatch_flags)
1110 cs = intel_ring_begin(rq, 2);
1114 *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
1115 *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
1116 MI_BATCH_NON_SECURE);
1117 intel_ring_advance(rq, cs);
1122 int intel_ring_pin(struct intel_ring *ring)
1124 struct i915_vma *vma = ring->vma;
1125 enum i915_map_type map = i915_coherent_map_type(vma->vm->i915);
1130 GEM_BUG_ON(ring->vaddr);
1134 /* Ring wraparound at offset 0 sometimes hangs. No idea why. */
1135 flags |= PIN_OFFSET_BIAS | i915_ggtt_pin_bias(vma);
1137 if (vma->obj->stolen)
1138 flags |= PIN_MAPPABLE;
1142 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1143 if (flags & PIN_MAPPABLE || map == I915_MAP_WC)
1144 ret = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1146 ret = i915_gem_object_set_to_cpu_domain(vma->obj, true);
1151 ret = i915_vma_pin(vma, 0, 0, flags);
1155 if (i915_vma_is_map_and_fenceable(vma))
1156 addr = (void __force *)i915_vma_pin_iomap(vma);
1158 addr = i915_gem_object_pin_map(vma->obj, map);
1162 vma->obj->pin_global++;
1168 i915_vma_unpin(vma);
1169 return PTR_ERR(addr);
1172 void intel_ring_reset(struct intel_ring *ring, u32 tail)
1174 GEM_BUG_ON(!intel_ring_offset_valid(ring, tail));
1179 intel_ring_update_space(ring);
1182 void intel_ring_unpin(struct intel_ring *ring)
1184 GEM_BUG_ON(!ring->vma);
1185 GEM_BUG_ON(!ring->vaddr);
1187 /* Discard any unused bytes beyond that submitted to hw. */
1188 intel_ring_reset(ring, ring->tail);
1190 if (i915_vma_is_map_and_fenceable(ring->vma))
1191 i915_vma_unpin_iomap(ring->vma);
1193 i915_gem_object_unpin_map(ring->vma->obj);
1196 ring->vma->obj->pin_global--;
1197 i915_vma_unpin(ring->vma);
1200 static struct i915_vma *
1201 intel_ring_create_vma(struct drm_i915_private *dev_priv, int size)
1203 struct i915_address_space *vm = &dev_priv->ggtt.vm;
1204 struct drm_i915_gem_object *obj;
1205 struct i915_vma *vma;
1207 obj = i915_gem_object_create_stolen(dev_priv, size);
1209 obj = i915_gem_object_create_internal(dev_priv, size);
1211 return ERR_CAST(obj);
1214 * Mark ring buffers as read-only from GPU side (so no stray overwrites)
1215 * if supported by the platform's GGTT.
1217 if (vm->has_read_only)
1218 i915_gem_object_set_readonly(obj);
1220 vma = i915_vma_instance(obj, vm, NULL);
1227 i915_gem_object_put(obj);
1232 intel_engine_create_ring(struct intel_engine_cs *engine,
1233 struct i915_timeline *timeline,
1236 struct intel_ring *ring;
1237 struct i915_vma *vma;
1239 GEM_BUG_ON(!is_power_of_2(size));
1240 GEM_BUG_ON(RING_CTL_SIZE(size) & ~RING_NR_PAGES);
1241 GEM_BUG_ON(timeline == &engine->timeline);
1242 lockdep_assert_held(&engine->i915->drm.struct_mutex);
1244 ring = kzalloc(sizeof(*ring), GFP_KERNEL);
1246 return ERR_PTR(-ENOMEM);
1248 INIT_LIST_HEAD(&ring->request_list);
1249 ring->timeline = i915_timeline_get(timeline);
1252 /* Workaround an erratum on the i830 which causes a hang if
1253 * the TAIL pointer points to within the last 2 cachelines
1256 ring->effective_size = size;
1257 if (IS_I830(engine->i915) || IS_I845G(engine->i915))
1258 ring->effective_size -= 2 * CACHELINE_BYTES;
1260 intel_ring_update_space(ring);
1262 vma = intel_ring_create_vma(engine->i915, size);
1265 return ERR_CAST(vma);
1273 intel_ring_free(struct intel_ring *ring)
1275 struct drm_i915_gem_object *obj = ring->vma->obj;
1277 i915_vma_close(ring->vma);
1278 __i915_gem_object_release_unless_active(obj);
1280 i915_timeline_put(ring->timeline);
1284 static void intel_ring_context_destroy(struct intel_context *ce)
1286 GEM_BUG_ON(ce->pin_count);
1291 GEM_BUG_ON(i915_gem_object_is_active(ce->state->obj));
1292 i915_gem_object_put(ce->state->obj);
1295 static int __context_pin_ppgtt(struct i915_gem_context *ctx)
1297 struct i915_hw_ppgtt *ppgtt;
1300 ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
1302 err = gen6_ppgtt_pin(ppgtt);
1307 static void __context_unpin_ppgtt(struct i915_gem_context *ctx)
1309 struct i915_hw_ppgtt *ppgtt;
1311 ppgtt = ctx->ppgtt ?: ctx->i915->mm.aliasing_ppgtt;
1313 gen6_ppgtt_unpin(ppgtt);
1316 static int __context_pin(struct intel_context *ce)
1318 struct i915_vma *vma;
1326 * Clear this page out of any CPU caches for coherent swap-in/out.
1327 * We only want to do this on the first bind so that we do not stall
1328 * on an active context (which by nature is already on the GPU).
1330 if (!(vma->flags & I915_VMA_GLOBAL_BIND)) {
1331 err = i915_gem_object_set_to_gtt_domain(vma->obj, true);
1336 err = i915_vma_pin(vma, 0, 0, PIN_GLOBAL | PIN_HIGH);
1341 * And mark is as a globally pinned object to let the shrinker know
1342 * it cannot reclaim the object until we release it.
1344 vma->obj->pin_global++;
1349 static void __context_unpin(struct intel_context *ce)
1351 struct i915_vma *vma;
1357 vma->obj->pin_global--;
1358 i915_vma_unpin(vma);
1361 static void intel_ring_context_unpin(struct intel_context *ce)
1363 __context_unpin_ppgtt(ce->gem_context);
1364 __context_unpin(ce);
1366 i915_gem_context_put(ce->gem_context);
1369 static struct i915_vma *
1370 alloc_context_vma(struct intel_engine_cs *engine)
1372 struct drm_i915_private *i915 = engine->i915;
1373 struct drm_i915_gem_object *obj;
1374 struct i915_vma *vma;
1377 obj = i915_gem_object_create(i915, engine->context_size);
1379 return ERR_CAST(obj);
1381 if (engine->default_state) {
1382 void *defaults, *vaddr;
1384 vaddr = i915_gem_object_pin_map(obj, I915_MAP_WB);
1385 if (IS_ERR(vaddr)) {
1386 err = PTR_ERR(vaddr);
1390 defaults = i915_gem_object_pin_map(engine->default_state,
1392 if (IS_ERR(defaults)) {
1393 err = PTR_ERR(defaults);
1397 memcpy(vaddr, defaults, engine->context_size);
1399 i915_gem_object_unpin_map(engine->default_state);
1400 i915_gem_object_unpin_map(obj);
1404 * Try to make the context utilize L3 as well as LLC.
1406 * On VLV we don't have L3 controls in the PTEs so we
1407 * shouldn't touch the cache level, especially as that
1408 * would make the object snooped which might have a
1409 * negative performance impact.
1411 * Snooping is required on non-llc platforms in execlist
1412 * mode, but since all GGTT accesses use PAT entry 0 we
1413 * get snooping anyway regardless of cache_level.
1415 * This is only applicable for Ivy Bridge devices since
1416 * later platforms don't have L3 control bits in the PTE.
1418 if (IS_IVYBRIDGE(i915)) {
1419 /* Ignore any error, regard it as a simple optimisation */
1420 i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
1423 vma = i915_vma_instance(obj, &i915->ggtt.vm, NULL);
1432 i915_gem_object_unpin_map(obj);
1434 i915_gem_object_put(obj);
1435 return ERR_PTR(err);
1438 static struct intel_context *
1439 __ring_context_pin(struct intel_engine_cs *engine,
1440 struct i915_gem_context *ctx,
1441 struct intel_context *ce)
1445 if (!ce->state && engine->context_size) {
1446 struct i915_vma *vma;
1448 vma = alloc_context_vma(engine);
1457 err = __context_pin(ce);
1461 err = __context_pin_ppgtt(ce->gem_context);
1465 i915_gem_context_get(ctx);
1467 /* One ringbuffer to rule them all */
1468 GEM_BUG_ON(!engine->buffer);
1469 ce->ring = engine->buffer;
1474 __context_unpin(ce);
1477 return ERR_PTR(err);
1480 static const struct intel_context_ops ring_context_ops = {
1481 .unpin = intel_ring_context_unpin,
1482 .destroy = intel_ring_context_destroy,
1485 static struct intel_context *
1486 intel_ring_context_pin(struct intel_engine_cs *engine,
1487 struct i915_gem_context *ctx)
1489 struct intel_context *ce = to_intel_context(ctx, engine);
1491 lockdep_assert_held(&ctx->i915->drm.struct_mutex);
1493 if (likely(ce->pin_count++))
1495 GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
1497 ce->ops = &ring_context_ops;
1499 return __ring_context_pin(engine, ctx, ce);
1502 static int intel_init_ring_buffer(struct intel_engine_cs *engine)
1504 struct i915_timeline *timeline;
1505 struct intel_ring *ring;
1508 intel_engine_setup_common(engine);
1510 timeline = i915_timeline_create(engine->i915, engine->name);
1511 if (IS_ERR(timeline)) {
1512 err = PTR_ERR(timeline);
1516 ring = intel_engine_create_ring(engine, timeline, 32 * PAGE_SIZE);
1517 i915_timeline_put(timeline);
1519 err = PTR_ERR(ring);
1523 err = intel_ring_pin(ring);
1527 GEM_BUG_ON(engine->buffer);
1528 engine->buffer = ring;
1530 err = intel_engine_init_common(engine);
1537 intel_ring_unpin(ring);
1539 intel_ring_free(ring);
1541 intel_engine_cleanup_common(engine);
1545 void intel_engine_cleanup(struct intel_engine_cs *engine)
1547 struct drm_i915_private *dev_priv = engine->i915;
1549 WARN_ON(INTEL_GEN(dev_priv) > 2 &&
1550 (I915_READ_MODE(engine) & MODE_IDLE) == 0);
1552 intel_ring_unpin(engine->buffer);
1553 intel_ring_free(engine->buffer);
1555 if (engine->cleanup)
1556 engine->cleanup(engine);
1558 intel_engine_cleanup_common(engine);
1560 dev_priv->engine[engine->id] = NULL;
1564 void intel_legacy_submission_resume(struct drm_i915_private *dev_priv)
1566 struct intel_engine_cs *engine;
1567 enum intel_engine_id id;
1569 /* Restart from the beginning of the rings for convenience */
1570 for_each_engine(engine, dev_priv, id)
1571 intel_ring_reset(engine->buffer, 0);
1574 static int load_pd_dir(struct i915_request *rq,
1575 const struct i915_hw_ppgtt *ppgtt)
1577 const struct intel_engine_cs * const engine = rq->engine;
1580 cs = intel_ring_begin(rq, 6);
1584 *cs++ = MI_LOAD_REGISTER_IMM(1);
1585 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_DCLV(engine));
1586 *cs++ = PP_DIR_DCLV_2G;
1588 *cs++ = MI_LOAD_REGISTER_IMM(1);
1589 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1590 *cs++ = ppgtt->pd.base.ggtt_offset << 10;
1592 intel_ring_advance(rq, cs);
1597 static int flush_pd_dir(struct i915_request *rq)
1599 const struct intel_engine_cs * const engine = rq->engine;
1602 cs = intel_ring_begin(rq, 4);
1606 /* Stall until the page table load is complete */
1607 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1608 *cs++ = i915_mmio_reg_offset(RING_PP_DIR_BASE(engine));
1609 *cs++ = i915_scratch_offset(rq->i915);
1612 intel_ring_advance(rq, cs);
1616 static inline int mi_set_context(struct i915_request *rq, u32 flags)
1618 struct drm_i915_private *i915 = rq->i915;
1619 struct intel_engine_cs *engine = rq->engine;
1620 enum intel_engine_id id;
1621 const int num_rings =
1622 IS_HSW_GT1(i915) ? RUNTIME_INFO(i915)->num_rings - 1 : 0;
1623 bool force_restore = false;
1627 flags |= MI_MM_SPACE_GTT;
1628 if (IS_HASWELL(i915))
1629 /* These flags are for resource streamer on HSW+ */
1630 flags |= HSW_MI_RS_SAVE_STATE_EN | HSW_MI_RS_RESTORE_STATE_EN;
1632 flags |= MI_SAVE_EXT_STATE_EN | MI_RESTORE_EXT_STATE_EN;
1635 if (IS_GEN(i915, 7))
1636 len += 2 + (num_rings ? 4*num_rings + 6 : 0);
1637 if (flags & MI_FORCE_RESTORE) {
1638 GEM_BUG_ON(flags & MI_RESTORE_INHIBIT);
1639 flags &= ~MI_FORCE_RESTORE;
1640 force_restore = true;
1644 cs = intel_ring_begin(rq, len);
1648 /* WaProgramMiArbOnOffAroundMiSetContext:ivb,vlv,hsw,bdw,chv */
1649 if (IS_GEN(i915, 7)) {
1650 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
1652 struct intel_engine_cs *signaller;
1654 *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
1655 for_each_engine(signaller, i915, id) {
1656 if (signaller == engine)
1659 *cs++ = i915_mmio_reg_offset(
1660 RING_PSMI_CTL(signaller->mmio_base));
1661 *cs++ = _MASKED_BIT_ENABLE(
1662 GEN6_PSMI_SLEEP_MSG_DISABLE);
1667 if (force_restore) {
1669 * The HW doesn't handle being told to restore the current
1670 * context very well. Quite often it likes goes to go off and
1671 * sulk, especially when it is meant to be reloading PP_DIR.
1672 * A very simple fix to force the reload is to simply switch
1673 * away from the current context and back again.
1675 * Note that the kernel_context will contain random state
1676 * following the INHIBIT_RESTORE. We accept this since we
1677 * never use the kernel_context state; it is merely a
1678 * placeholder we use to flush other contexts.
1680 *cs++ = MI_SET_CONTEXT;
1681 *cs++ = i915_ggtt_offset(to_intel_context(i915->kernel_context,
1688 *cs++ = MI_SET_CONTEXT;
1689 *cs++ = i915_ggtt_offset(rq->hw_context->state) | flags;
1691 * w/a: MI_SET_CONTEXT must always be followed by MI_NOOP
1692 * WaMiSetContext_Hang:snb,ivb,vlv
1696 if (IS_GEN(i915, 7)) {
1698 struct intel_engine_cs *signaller;
1699 i915_reg_t last_reg = {}; /* keep gcc quiet */
1701 *cs++ = MI_LOAD_REGISTER_IMM(num_rings);
1702 for_each_engine(signaller, i915, id) {
1703 if (signaller == engine)
1706 last_reg = RING_PSMI_CTL(signaller->mmio_base);
1707 *cs++ = i915_mmio_reg_offset(last_reg);
1708 *cs++ = _MASKED_BIT_DISABLE(
1709 GEN6_PSMI_SLEEP_MSG_DISABLE);
1712 /* Insert a delay before the next switch! */
1713 *cs++ = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
1714 *cs++ = i915_mmio_reg_offset(last_reg);
1715 *cs++ = i915_scratch_offset(rq->i915);
1718 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
1721 intel_ring_advance(rq, cs);
1726 static int remap_l3(struct i915_request *rq, int slice)
1728 u32 *cs, *remap_info = rq->i915->l3_parity.remap_info[slice];
1734 cs = intel_ring_begin(rq, GEN7_L3LOG_SIZE/4 * 2 + 2);
1739 * Note: We do not worry about the concurrent register cacheline hang
1740 * here because no other code should access these registers other than
1741 * at initialization time.
1743 *cs++ = MI_LOAD_REGISTER_IMM(GEN7_L3LOG_SIZE/4);
1744 for (i = 0; i < GEN7_L3LOG_SIZE/4; i++) {
1745 *cs++ = i915_mmio_reg_offset(GEN7_L3LOG(slice, i));
1746 *cs++ = remap_info[i];
1749 intel_ring_advance(rq, cs);
1754 static int switch_context(struct i915_request *rq)
1756 struct intel_engine_cs *engine = rq->engine;
1757 struct i915_gem_context *ctx = rq->gem_context;
1758 struct i915_hw_ppgtt *ppgtt = ctx->ppgtt ?: rq->i915->mm.aliasing_ppgtt;
1759 unsigned int unwind_mm = 0;
1763 lockdep_assert_held(&rq->i915->drm.struct_mutex);
1764 GEM_BUG_ON(HAS_EXECLISTS(rq->i915));
1770 * Baytail takes a little more convincing that it really needs
1771 * to reload the PD between contexts. It is not just a little
1772 * longer, as adding more stalls after the load_pd_dir (i.e.
1773 * adding a long loop around flush_pd_dir) is not as effective
1774 * as reloading the PD umpteen times. 32 is derived from
1775 * experimentation (gem_exec_parallel/fds) and has no good
1779 if (engine->id == BCS && IS_VALLEYVIEW(engine->i915))
1783 ret = load_pd_dir(rq, ppgtt);
1788 if (intel_engine_flag(engine) & ppgtt->pd_dirty_rings) {
1789 unwind_mm = intel_engine_flag(engine);
1790 ppgtt->pd_dirty_rings &= ~unwind_mm;
1791 hw_flags = MI_FORCE_RESTORE;
1795 if (rq->hw_context->state) {
1796 GEM_BUG_ON(engine->id != RCS);
1799 * The kernel context(s) is treated as pure scratch and is not
1800 * expected to retain any state (as we sacrifice it during
1801 * suspend and on resume it may be corrupted). This is ok,
1802 * as nothing actually executes using the kernel context; it
1803 * is purely used for flushing user contexts.
1805 if (i915_gem_context_is_kernel(ctx))
1806 hw_flags = MI_RESTORE_INHIBIT;
1808 ret = mi_set_context(rq, hw_flags);
1814 ret = engine->emit_flush(rq, EMIT_INVALIDATE);
1818 ret = flush_pd_dir(rq);
1823 * Not only do we need a full barrier (post-sync write) after
1824 * invalidating the TLBs, but we need to wait a little bit
1825 * longer. Whether this is merely delaying us, or the
1826 * subsequent flush is a key part of serialising with the
1827 * post-sync op, this extra pass appears vital before a
1830 ret = engine->emit_flush(rq, EMIT_INVALIDATE);
1834 ret = engine->emit_flush(rq, EMIT_FLUSH);
1839 if (ctx->remap_slice) {
1840 for (i = 0; i < MAX_L3_SLICES; i++) {
1841 if (!(ctx->remap_slice & BIT(i)))
1844 ret = remap_l3(rq, i);
1849 ctx->remap_slice = 0;
1856 ppgtt->pd_dirty_rings |= unwind_mm;
1861 static int ring_request_alloc(struct i915_request *request)
1865 GEM_BUG_ON(!request->hw_context->pin_count);
1868 * Flush enough space to reduce the likelihood of waiting after
1869 * we start building the request - in which case we will just
1870 * have to repeat work.
1872 request->reserved_space += LEGACY_REQUEST_SIZE;
1874 ret = switch_context(request);
1878 /* Unconditionally invalidate GPU caches and TLBs. */
1879 ret = request->engine->emit_flush(request, EMIT_INVALIDATE);
1883 request->reserved_space -= LEGACY_REQUEST_SIZE;
1887 static noinline int wait_for_space(struct intel_ring *ring, unsigned int bytes)
1889 struct i915_request *target;
1892 lockdep_assert_held(&ring->vma->vm->i915->drm.struct_mutex);
1894 if (intel_ring_update_space(ring) >= bytes)
1897 GEM_BUG_ON(list_empty(&ring->request_list));
1898 list_for_each_entry(target, &ring->request_list, ring_link) {
1899 /* Would completion of this request free enough space? */
1900 if (bytes <= __intel_ring_space(target->postfix,
1901 ring->emit, ring->size))
1905 if (WARN_ON(&target->ring_link == &ring->request_list))
1908 timeout = i915_request_wait(target,
1909 I915_WAIT_INTERRUPTIBLE | I915_WAIT_LOCKED,
1910 MAX_SCHEDULE_TIMEOUT);
1914 i915_request_retire_upto(target);
1916 intel_ring_update_space(ring);
1917 GEM_BUG_ON(ring->space < bytes);
1921 u32 *intel_ring_begin(struct i915_request *rq, unsigned int num_dwords)
1923 struct intel_ring *ring = rq->ring;
1924 const unsigned int remain_usable = ring->effective_size - ring->emit;
1925 const unsigned int bytes = num_dwords * sizeof(u32);
1926 unsigned int need_wrap = 0;
1927 unsigned int total_bytes;
1930 /* Packets must be qword aligned. */
1931 GEM_BUG_ON(num_dwords & 1);
1933 total_bytes = bytes + rq->reserved_space;
1934 GEM_BUG_ON(total_bytes > ring->effective_size);
1936 if (unlikely(total_bytes > remain_usable)) {
1937 const int remain_actual = ring->size - ring->emit;
1939 if (bytes > remain_usable) {
1941 * Not enough space for the basic request. So need to
1942 * flush out the remainder and then wait for
1945 total_bytes += remain_actual;
1946 need_wrap = remain_actual | 1;
1949 * The base request will fit but the reserved space
1950 * falls off the end. So we don't need an immediate
1951 * wrap and only need to effectively wait for the
1952 * reserved size from the start of ringbuffer.
1954 total_bytes = rq->reserved_space + remain_actual;
1958 if (unlikely(total_bytes > ring->space)) {
1962 * Space is reserved in the ringbuffer for finalising the
1963 * request, as that cannot be allowed to fail. During request
1964 * finalisation, reserved_space is set to 0 to stop the
1965 * overallocation and the assumption is that then we never need
1966 * to wait (which has the risk of failing with EINTR).
1968 * See also i915_request_alloc() and i915_request_add().
1970 GEM_BUG_ON(!rq->reserved_space);
1972 ret = wait_for_space(ring, total_bytes);
1974 return ERR_PTR(ret);
1977 if (unlikely(need_wrap)) {
1979 GEM_BUG_ON(need_wrap > ring->space);
1980 GEM_BUG_ON(ring->emit + need_wrap > ring->size);
1981 GEM_BUG_ON(!IS_ALIGNED(need_wrap, sizeof(u64)));
1983 /* Fill the tail with MI_NOOP */
1984 memset64(ring->vaddr + ring->emit, 0, need_wrap / sizeof(u64));
1985 ring->space -= need_wrap;
1989 GEM_BUG_ON(ring->emit > ring->size - bytes);
1990 GEM_BUG_ON(ring->space < bytes);
1991 cs = ring->vaddr + ring->emit;
1992 GEM_DEBUG_EXEC(memset32(cs, POISON_INUSE, bytes / sizeof(*cs)));
1993 ring->emit += bytes;
1994 ring->space -= bytes;
1999 /* Align the ring tail to a cacheline boundary */
2000 int intel_ring_cacheline_align(struct i915_request *rq)
2005 num_dwords = (rq->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(u32);
2006 if (num_dwords == 0)
2009 num_dwords = CACHELINE_DWORDS - num_dwords;
2010 GEM_BUG_ON(num_dwords & 1);
2012 cs = intel_ring_begin(rq, num_dwords);
2016 memset64(cs, (u64)MI_NOOP << 32 | MI_NOOP, num_dwords / 2);
2017 intel_ring_advance(rq, cs);
2019 GEM_BUG_ON(rq->ring->emit & (CACHELINE_BYTES - 1));
2023 static void gen6_bsd_submit_request(struct i915_request *request)
2025 struct drm_i915_private *dev_priv = request->i915;
2027 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
2029 /* Every tail move must follow the sequence below */
2031 /* Disable notification that the ring is IDLE. The GT
2032 * will then assume that it is busy and bring it out of rc6.
2034 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
2035 _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2037 /* Clear the context id. Here be magic! */
2038 I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
2040 /* Wait for the ring not to be idle, i.e. for it to wake up. */
2041 if (__intel_wait_for_register_fw(dev_priv,
2042 GEN6_BSD_SLEEP_PSMI_CONTROL,
2043 GEN6_BSD_SLEEP_INDICATOR,
2046 DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
2048 /* Now that the ring is fully powered up, update the tail */
2049 i9xx_submit_request(request);
2051 /* Let the ring send IDLE messages to the GT again,
2052 * and so let it sleep to conserve power when idle.
2054 I915_WRITE_FW(GEN6_BSD_SLEEP_PSMI_CONTROL,
2055 _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE));
2057 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
2060 static int mi_flush_dw(struct i915_request *rq, u32 flags)
2064 cs = intel_ring_begin(rq, 4);
2071 * We always require a command barrier so that subsequent
2072 * commands, such as breadcrumb interrupts, are strictly ordered
2073 * wrt the contents of the write cache being flushed to memory
2074 * (and thus being coherent from the CPU).
2076 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
2079 * Bspec vol 1c.3 - blitter engine command streamer:
2080 * "If ENABLED, all TLBs will be invalidated once the flush
2081 * operation is complete. This bit is only valid when the
2082 * Post-Sync Operation field is a value of 1h or 3h."
2087 *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
2091 intel_ring_advance(rq, cs);
2096 static int gen6_flush_dw(struct i915_request *rq, u32 mode, u32 invflags)
2098 return mi_flush_dw(rq, mode & EMIT_INVALIDATE ? invflags : 0);
2101 static int gen6_bsd_ring_flush(struct i915_request *rq, u32 mode)
2103 return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB | MI_INVALIDATE_BSD);
2107 hsw_emit_bb_start(struct i915_request *rq,
2108 u64 offset, u32 len,
2109 unsigned int dispatch_flags)
2113 cs = intel_ring_begin(rq, 2);
2117 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
2118 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW);
2119 /* bit0-7 is the length on GEN6+ */
2121 intel_ring_advance(rq, cs);
2127 gen6_emit_bb_start(struct i915_request *rq,
2128 u64 offset, u32 len,
2129 unsigned int dispatch_flags)
2133 cs = intel_ring_begin(rq, 2);
2137 *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
2138 0 : MI_BATCH_NON_SECURE_I965);
2139 /* bit0-7 is the length on GEN6+ */
2141 intel_ring_advance(rq, cs);
2146 /* Blitter support (SandyBridge+) */
2148 static int gen6_ring_flush(struct i915_request *rq, u32 mode)
2150 return gen6_flush_dw(rq, mode, MI_INVALIDATE_TLB);
2153 static void intel_ring_init_irq(struct drm_i915_private *dev_priv,
2154 struct intel_engine_cs *engine)
2156 if (INTEL_GEN(dev_priv) >= 6) {
2157 engine->irq_enable = gen6_irq_enable;
2158 engine->irq_disable = gen6_irq_disable;
2159 } else if (INTEL_GEN(dev_priv) >= 5) {
2160 engine->irq_enable = gen5_irq_enable;
2161 engine->irq_disable = gen5_irq_disable;
2162 } else if (INTEL_GEN(dev_priv) >= 3) {
2163 engine->irq_enable = i9xx_irq_enable;
2164 engine->irq_disable = i9xx_irq_disable;
2166 engine->irq_enable = i8xx_irq_enable;
2167 engine->irq_disable = i8xx_irq_disable;
2171 static void i9xx_set_default_submission(struct intel_engine_cs *engine)
2173 engine->submit_request = i9xx_submit_request;
2174 engine->cancel_requests = cancel_requests;
2176 engine->park = NULL;
2177 engine->unpark = NULL;
2180 static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
2182 i9xx_set_default_submission(engine);
2183 engine->submit_request = gen6_bsd_submit_request;
2186 static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
2187 struct intel_engine_cs *engine)
2189 /* gen8+ are only supported with execlists */
2190 GEM_BUG_ON(INTEL_GEN(dev_priv) >= 8);
2192 intel_ring_init_irq(dev_priv, engine);
2194 engine->init_hw = init_ring_common;
2195 engine->reset.prepare = reset_prepare;
2196 engine->reset.reset = reset_ring;
2197 engine->reset.finish = reset_finish;
2199 engine->context_pin = intel_ring_context_pin;
2200 engine->request_alloc = ring_request_alloc;
2202 engine->emit_breadcrumb = i9xx_emit_breadcrumb;
2203 if (IS_GEN(dev_priv, 5))
2204 engine->emit_breadcrumb = gen5_emit_breadcrumb;
2206 engine->set_default_submission = i9xx_set_default_submission;
2208 if (INTEL_GEN(dev_priv) >= 6)
2209 engine->emit_bb_start = gen6_emit_bb_start;
2210 else if (INTEL_GEN(dev_priv) >= 4)
2211 engine->emit_bb_start = i965_emit_bb_start;
2212 else if (IS_I830(dev_priv) || IS_I845G(dev_priv))
2213 engine->emit_bb_start = i830_emit_bb_start;
2215 engine->emit_bb_start = i915_emit_bb_start;
2218 int intel_init_render_ring_buffer(struct intel_engine_cs *engine)
2220 struct drm_i915_private *dev_priv = engine->i915;
2223 intel_ring_default_vfuncs(dev_priv, engine);
2225 if (HAS_L3_DPF(dev_priv))
2226 engine->irq_keep_mask = GT_RENDER_L3_PARITY_ERROR_INTERRUPT;
2228 engine->irq_enable_mask = GT_RENDER_USER_INTERRUPT;
2230 if (INTEL_GEN(dev_priv) >= 7) {
2231 engine->init_context = intel_rcs_ctx_init;
2232 engine->emit_flush = gen7_render_ring_flush;
2233 engine->emit_breadcrumb = gen7_rcs_emit_breadcrumb;
2234 } else if (IS_GEN(dev_priv, 6)) {
2235 engine->init_context = intel_rcs_ctx_init;
2236 engine->emit_flush = gen6_render_ring_flush;
2237 engine->emit_breadcrumb = gen6_rcs_emit_breadcrumb;
2238 } else if (IS_GEN(dev_priv, 5)) {
2239 engine->emit_flush = gen4_render_ring_flush;
2241 if (INTEL_GEN(dev_priv) < 4)
2242 engine->emit_flush = gen2_render_ring_flush;
2244 engine->emit_flush = gen4_render_ring_flush;
2245 engine->irq_enable_mask = I915_USER_INTERRUPT;
2248 if (IS_HASWELL(dev_priv))
2249 engine->emit_bb_start = hsw_emit_bb_start;
2251 engine->init_hw = init_render_ring;
2253 ret = intel_init_ring_buffer(engine);
2260 int intel_init_bsd_ring_buffer(struct intel_engine_cs *engine)
2262 struct drm_i915_private *dev_priv = engine->i915;
2264 intel_ring_default_vfuncs(dev_priv, engine);
2266 if (INTEL_GEN(dev_priv) >= 6) {
2267 /* gen6 bsd needs a special wa for tail updates */
2268 if (IS_GEN(dev_priv, 6))
2269 engine->set_default_submission = gen6_bsd_set_default_submission;
2270 engine->emit_flush = gen6_bsd_ring_flush;
2271 engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
2273 if (IS_GEN(dev_priv, 6))
2274 engine->emit_breadcrumb = gen6_xcs_emit_breadcrumb;
2276 engine->emit_breadcrumb = gen7_xcs_emit_breadcrumb;
2278 engine->emit_flush = bsd_ring_flush;
2279 if (IS_GEN(dev_priv, 5))
2280 engine->irq_enable_mask = ILK_BSD_USER_INTERRUPT;
2282 engine->irq_enable_mask = I915_BSD_USER_INTERRUPT;
2285 return intel_init_ring_buffer(engine);
2288 int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
2290 struct drm_i915_private *dev_priv = engine->i915;
2292 GEM_BUG_ON(INTEL_GEN(dev_priv) < 6);
2294 intel_ring_default_vfuncs(dev_priv, engine);
2296 engine->emit_flush = gen6_ring_flush;
2297 engine->irq_enable_mask = GT_BLT_USER_INTERRUPT;
2299 if (IS_GEN(dev_priv, 6))
2300 engine->emit_breadcrumb = gen6_xcs_emit_breadcrumb;
2302 engine->emit_breadcrumb = gen7_xcs_emit_breadcrumb;
2304 return intel_init_ring_buffer(engine);
2307 int intel_init_vebox_ring_buffer(struct intel_engine_cs *engine)
2309 struct drm_i915_private *dev_priv = engine->i915;
2311 GEM_BUG_ON(INTEL_GEN(dev_priv) < 7);
2313 intel_ring_default_vfuncs(dev_priv, engine);
2315 engine->emit_flush = gen6_ring_flush;
2316 engine->irq_enable_mask = PM_VEBOX_USER_INTERRUPT;
2317 engine->irq_enable = hsw_vebox_irq_enable;
2318 engine->irq_disable = hsw_vebox_irq_disable;
2320 engine->emit_breadcrumb = gen7_xcs_emit_breadcrumb;
2322 return intel_init_ring_buffer(engine);