1 // SPDX-License-Identifier: MIT
3 * Copyright © 2014 Intel Corporation
6 #include "gen8_engine_cs.h"
8 #include "intel_engine_regs.h"
9 #include "intel_gpu_commands.h"
10 #include "intel_lrc.h"
11 #include "intel_ring.h"
13 int gen8_emit_flush_rcs(struct i915_request *rq, u32 mode)
15 bool vf_flush_wa = false, dc_flush_wa = false;
19 flags |= PIPE_CONTROL_CS_STALL;
21 if (mode & EMIT_FLUSH) {
22 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
23 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
24 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
25 flags |= PIPE_CONTROL_FLUSH_ENABLE;
28 if (mode & EMIT_INVALIDATE) {
29 flags |= PIPE_CONTROL_TLB_INVALIDATE;
30 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
31 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
32 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
33 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
34 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
35 flags |= PIPE_CONTROL_QW_WRITE;
36 flags |= PIPE_CONTROL_STORE_DATA_INDEX;
39 * On GEN9: before VF_CACHE_INVALIDATE we need to emit a NULL
42 if (GRAPHICS_VER(rq->engine->i915) == 9)
45 /* WaForGAMHang:kbl */
46 if (IS_KBL_GRAPHICS_STEP(rq->engine->i915, 0, STEP_C0))
58 cs = intel_ring_begin(rq, len);
63 cs = gen8_emit_pipe_control(cs, 0, 0);
66 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_DC_FLUSH_ENABLE,
69 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
72 cs = gen8_emit_pipe_control(cs, PIPE_CONTROL_CS_STALL, 0);
74 intel_ring_advance(rq, cs);
79 int gen8_emit_flush_xcs(struct i915_request *rq, u32 mode)
83 cs = intel_ring_begin(rq, 4);
87 cmd = MI_FLUSH_DW + 1;
90 * We always require a command barrier so that subsequent
91 * commands, such as breadcrumb interrupts, are strictly ordered
92 * wrt the contents of the write cache being flushed to memory
93 * (and thus being coherent from the CPU).
95 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
97 if (mode & EMIT_INVALIDATE) {
98 cmd |= MI_INVALIDATE_TLB;
99 if (rq->engine->class == VIDEO_DECODE_CLASS)
100 cmd |= MI_INVALIDATE_BSD;
104 *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
105 *cs++ = 0; /* upper addr */
106 *cs++ = 0; /* value */
107 intel_ring_advance(rq, cs);
112 int gen11_emit_flush_rcs(struct i915_request *rq, u32 mode)
114 if (mode & EMIT_FLUSH) {
118 flags |= PIPE_CONTROL_CS_STALL;
120 flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
121 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
122 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
123 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
124 flags |= PIPE_CONTROL_FLUSH_ENABLE;
125 flags |= PIPE_CONTROL_QW_WRITE;
126 flags |= PIPE_CONTROL_STORE_DATA_INDEX;
128 cs = intel_ring_begin(rq, 6);
132 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
133 intel_ring_advance(rq, cs);
136 if (mode & EMIT_INVALIDATE) {
140 flags |= PIPE_CONTROL_CS_STALL;
142 flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
143 flags |= PIPE_CONTROL_TLB_INVALIDATE;
144 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
145 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
146 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
147 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
148 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
149 flags |= PIPE_CONTROL_QW_WRITE;
150 flags |= PIPE_CONTROL_STORE_DATA_INDEX;
152 cs = intel_ring_begin(rq, 6);
156 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
157 intel_ring_advance(rq, cs);
163 static u32 preparser_disable(bool state)
165 return MI_ARB_CHECK | 1 << 8 | state;
168 u32 *gen12_emit_aux_table_inv(u32 *cs, const i915_reg_t inv_reg)
170 *cs++ = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN;
171 *cs++ = i915_mmio_reg_offset(inv_reg);
178 int gen12_emit_flush_rcs(struct i915_request *rq, u32 mode)
180 struct intel_engine_cs *engine = rq->engine;
182 if (mode & EMIT_FLUSH) {
186 flags |= PIPE_CONTROL_TILE_CACHE_FLUSH;
187 flags |= PIPE_CONTROL_FLUSH_L3;
188 flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
189 flags |= PIPE_CONTROL_DEPTH_CACHE_FLUSH;
190 /* Wa_1409600907:tgl,adl-p */
191 flags |= PIPE_CONTROL_DEPTH_STALL;
192 flags |= PIPE_CONTROL_DC_FLUSH_ENABLE;
193 flags |= PIPE_CONTROL_FLUSH_ENABLE;
195 flags |= PIPE_CONTROL_STORE_DATA_INDEX;
196 flags |= PIPE_CONTROL_QW_WRITE;
198 flags |= PIPE_CONTROL_CS_STALL;
200 if (engine->class == COMPUTE_CLASS)
201 flags &= ~PIPE_CONTROL_3D_FLAGS;
203 cs = intel_ring_begin(rq, 6);
207 cs = gen12_emit_pipe_control(cs,
208 PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
209 flags, LRC_PPHWSP_SCRATCH_ADDR);
210 intel_ring_advance(rq, cs);
213 if (mode & EMIT_INVALIDATE) {
217 flags |= PIPE_CONTROL_COMMAND_CACHE_INVALIDATE;
218 flags |= PIPE_CONTROL_TLB_INVALIDATE;
219 flags |= PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE;
220 flags |= PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE;
221 flags |= PIPE_CONTROL_VF_CACHE_INVALIDATE;
222 flags |= PIPE_CONTROL_CONST_CACHE_INVALIDATE;
223 flags |= PIPE_CONTROL_STATE_CACHE_INVALIDATE;
225 flags |= PIPE_CONTROL_STORE_DATA_INDEX;
226 flags |= PIPE_CONTROL_QW_WRITE;
228 flags |= PIPE_CONTROL_CS_STALL;
230 if (engine->class == COMPUTE_CLASS)
231 flags &= ~PIPE_CONTROL_3D_FLAGS;
233 if (!HAS_FLAT_CCS(rq->engine->i915))
238 cs = intel_ring_begin(rq, count);
243 * Prevent the pre-parser from skipping past the TLB
244 * invalidate and loading a stale page for the batch
245 * buffer / request payload.
247 *cs++ = preparser_disable(true);
249 cs = gen8_emit_pipe_control(cs, flags, LRC_PPHWSP_SCRATCH_ADDR);
251 if (!HAS_FLAT_CCS(rq->engine->i915)) {
252 /* hsdes: 1809175790 */
253 cs = gen12_emit_aux_table_inv(cs, GEN12_GFX_CCS_AUX_NV);
256 *cs++ = preparser_disable(false);
257 intel_ring_advance(rq, cs);
263 int gen12_emit_flush_xcs(struct i915_request *rq, u32 mode)
265 intel_engine_mask_t aux_inv = 0;
269 if (mode & EMIT_INVALIDATE) {
272 if (!HAS_FLAT_CCS(rq->engine->i915) &&
273 (rq->engine->class == VIDEO_DECODE_CLASS ||
274 rq->engine->class == VIDEO_ENHANCEMENT_CLASS)) {
275 aux_inv = rq->engine->mask & ~BIT(BCS0);
281 cs = intel_ring_begin(rq, cmd);
285 if (mode & EMIT_INVALIDATE)
286 *cs++ = preparser_disable(true);
288 cmd = MI_FLUSH_DW + 1;
291 * We always require a command barrier so that subsequent
292 * commands, such as breadcrumb interrupts, are strictly ordered
293 * wrt the contents of the write cache being flushed to memory
294 * (and thus being coherent from the CPU).
296 cmd |= MI_FLUSH_DW_STORE_INDEX | MI_FLUSH_DW_OP_STOREDW;
298 if (mode & EMIT_INVALIDATE) {
299 cmd |= MI_INVALIDATE_TLB;
300 if (rq->engine->class == VIDEO_DECODE_CLASS)
301 cmd |= MI_INVALIDATE_BSD;
305 *cs++ = LRC_PPHWSP_SCRATCH_ADDR;
306 *cs++ = 0; /* upper addr */
307 *cs++ = 0; /* value */
309 if (aux_inv) { /* hsdes: 1809175790 */
310 if (rq->engine->class == VIDEO_DECODE_CLASS)
311 cs = gen12_emit_aux_table_inv(cs, GEN12_VD0_AUX_NV);
313 cs = gen12_emit_aux_table_inv(cs, GEN12_VE0_AUX_NV);
316 if (mode & EMIT_INVALIDATE)
317 *cs++ = preparser_disable(false);
319 intel_ring_advance(rq, cs);
324 static u32 preempt_address(struct intel_engine_cs *engine)
326 return (i915_ggtt_offset(engine->status_page.vma) +
327 I915_GEM_HWS_PREEMPT_ADDR);
330 static u32 hwsp_offset(const struct i915_request *rq)
332 const struct intel_timeline *tl;
334 /* Before the request is executed, the timeline is fixed */
335 tl = rcu_dereference_protected(rq->timeline,
336 !i915_request_signaled(rq));
338 /* See the comment in i915_request_active_seqno(). */
339 return page_mask_bits(tl->hwsp_offset) + offset_in_page(rq->hwsp_seqno);
342 int gen8_emit_init_breadcrumb(struct i915_request *rq)
346 GEM_BUG_ON(i915_request_has_initial_breadcrumb(rq));
347 if (!i915_request_timeline(rq)->has_initial_breadcrumb)
350 cs = intel_ring_begin(rq, 6);
354 *cs++ = MI_STORE_DWORD_IMM_GEN4 | MI_USE_GGTT;
355 *cs++ = hwsp_offset(rq);
357 *cs++ = rq->fence.seqno - 1;
360 * Check if we have been preempted before we even get started.
362 * After this point i915_request_started() reports true, even if
363 * we get preempted and so are no longer running.
365 * i915_request_started() is used during preemption processing
366 * to decide if the request is currently inside the user payload
367 * or spinning on a kernel semaphore (or earlier). For no-preemption
368 * requests, we do allow preemption on the semaphore before the user
369 * payload, but do not allow preemption once the request is started.
371 * i915_request_started() is similarly used during GPU hangs to
372 * determine if the user's payload was guilty, and if so, the
373 * request is banned. Before the request is started, it is assumed
374 * to be unharmed and an innocent victim of another's hang.
377 *cs++ = MI_ARB_CHECK;
379 intel_ring_advance(rq, cs);
381 /* Record the updated position of the request's payload */
382 rq->infix = intel_ring_offset(rq, cs);
384 __set_bit(I915_FENCE_FLAG_INITIAL_BREADCRUMB, &rq->fence.flags);
389 static int __gen125_emit_bb_start(struct i915_request *rq,
391 const unsigned int flags,
394 struct intel_context *ce = rq->context;
395 u32 wa_offset = lrc_indirect_bb(ce);
398 cs = intel_ring_begin(rq, 12);
402 *cs++ = MI_ARB_ON_OFF | arb;
404 *cs++ = MI_LOAD_REGISTER_MEM_GEN8 |
405 MI_SRM_LRM_GLOBAL_GTT |
407 *cs++ = i915_mmio_reg_offset(RING_PREDICATE_RESULT(0));
408 *cs++ = wa_offset + DG2_PREDICATE_RESULT_WA;
411 *cs++ = MI_BATCH_BUFFER_START_GEN8 |
412 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
413 *cs++ = lower_32_bits(offset);
414 *cs++ = upper_32_bits(offset);
416 /* Fixup stray MI_SET_PREDICATE as it prevents us executing the ring */
417 *cs++ = MI_BATCH_BUFFER_START_GEN8;
418 *cs++ = wa_offset + DG2_PREDICATE_RESULT_BB;
421 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
423 intel_ring_advance(rq, cs);
428 int gen125_emit_bb_start_noarb(struct i915_request *rq,
430 const unsigned int flags)
432 return __gen125_emit_bb_start(rq, offset, len, flags, MI_ARB_DISABLE);
435 int gen125_emit_bb_start(struct i915_request *rq,
437 const unsigned int flags)
439 return __gen125_emit_bb_start(rq, offset, len, flags, MI_ARB_ENABLE);
442 int gen8_emit_bb_start_noarb(struct i915_request *rq,
444 const unsigned int flags)
448 cs = intel_ring_begin(rq, 4);
453 * WaDisableCtxRestoreArbitration:bdw,chv
455 * We don't need to perform MI_ARB_ENABLE as often as we do (in
456 * particular all the gen that do not need the w/a at all!), if we
457 * took care to make sure that on every switch into this context
458 * (both ordinary and for preemption) that arbitrartion was enabled
459 * we would be fine. However, for gen8 there is another w/a that
460 * requires us to not preempt inside GPGPU execution, so we keep
461 * arbitration disabled for gen8 batches. Arbitration will be
462 * re-enabled before we close the request
463 * (engine->emit_fini_breadcrumb).
465 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
467 /* FIXME(BDW+): Address space and security selectors. */
468 *cs++ = MI_BATCH_BUFFER_START_GEN8 |
469 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
470 *cs++ = lower_32_bits(offset);
471 *cs++ = upper_32_bits(offset);
473 intel_ring_advance(rq, cs);
478 int gen8_emit_bb_start(struct i915_request *rq,
480 const unsigned int flags)
484 if (unlikely(i915_request_has_nopreempt(rq)))
485 return gen8_emit_bb_start_noarb(rq, offset, len, flags);
487 cs = intel_ring_begin(rq, 6);
491 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
493 *cs++ = MI_BATCH_BUFFER_START_GEN8 |
494 (flags & I915_DISPATCH_SECURE ? 0 : BIT(8));
495 *cs++ = lower_32_bits(offset);
496 *cs++ = upper_32_bits(offset);
498 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
501 intel_ring_advance(rq, cs);
506 static void assert_request_valid(struct i915_request *rq)
508 struct intel_ring *ring __maybe_unused = rq->ring;
510 /* Can we unwind this request without appearing to go forwards? */
511 GEM_BUG_ON(intel_ring_direction(ring, rq->wa_tail, rq->head) <= 0);
515 * Reserve space for 2 NOOPs at the end of each request to be
516 * used as a workaround for not being allowed to do lite
517 * restore with HEAD==TAIL (WaIdleLiteRestore).
519 static u32 *gen8_emit_wa_tail(struct i915_request *rq, u32 *cs)
521 /* Ensure there's always at least one preemption point per-request. */
522 *cs++ = MI_ARB_CHECK;
524 rq->wa_tail = intel_ring_offset(rq, cs);
526 /* Check that entire request is less than half the ring */
527 assert_request_valid(rq);
532 static u32 *emit_preempt_busywait(struct i915_request *rq, u32 *cs)
534 *cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */
535 *cs++ = MI_SEMAPHORE_WAIT |
536 MI_SEMAPHORE_GLOBAL_GTT |
538 MI_SEMAPHORE_SAD_EQ_SDD;
540 *cs++ = preempt_address(rq->engine);
547 static __always_inline u32*
548 gen8_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
550 *cs++ = MI_USER_INTERRUPT;
552 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
553 if (intel_engine_has_semaphores(rq->engine) &&
554 !intel_uc_uses_guc_submission(&rq->engine->gt->uc))
555 cs = emit_preempt_busywait(rq, cs);
557 rq->tail = intel_ring_offset(rq, cs);
558 assert_ring_tail_valid(rq->ring, rq->tail);
560 return gen8_emit_wa_tail(rq, cs);
563 static u32 *emit_xcs_breadcrumb(struct i915_request *rq, u32 *cs)
565 return gen8_emit_ggtt_write(cs, rq->fence.seqno, hwsp_offset(rq), 0);
568 u32 *gen8_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
570 return gen8_emit_fini_breadcrumb_tail(rq, emit_xcs_breadcrumb(rq, cs));
573 u32 *gen8_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
575 cs = gen8_emit_pipe_control(cs,
576 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
577 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
578 PIPE_CONTROL_DC_FLUSH_ENABLE,
581 /* XXX flush+write+CS_STALL all in one upsets gem_concurrent_blt:kbl */
582 cs = gen8_emit_ggtt_write_rcs(cs,
585 PIPE_CONTROL_FLUSH_ENABLE |
586 PIPE_CONTROL_CS_STALL);
588 return gen8_emit_fini_breadcrumb_tail(rq, cs);
591 u32 *gen11_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
593 cs = gen8_emit_ggtt_write_rcs(cs,
596 PIPE_CONTROL_CS_STALL |
597 PIPE_CONTROL_TILE_CACHE_FLUSH |
598 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
599 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
600 PIPE_CONTROL_DC_FLUSH_ENABLE |
601 PIPE_CONTROL_FLUSH_ENABLE);
603 return gen8_emit_fini_breadcrumb_tail(rq, cs);
607 * Note that the CS instruction pre-parser will not stall on the breadcrumb
608 * flush and will continue pre-fetching the instructions after it before the
609 * memory sync is completed. On pre-gen12 HW, the pre-parser will stop at
610 * BB_START/END instructions, so, even though we might pre-fetch the pre-amble
611 * of the next request before the memory has been flushed, we're guaranteed that
612 * we won't access the batch itself too early.
613 * However, on gen12+ the parser can pre-fetch across the BB_START/END commands,
614 * so, if the current request is modifying an instruction in the next request on
615 * the same intel_context, we might pre-fetch and then execute the pre-update
616 * instruction. To avoid this, the users of self-modifying code should either
617 * disable the parser around the code emitting the memory writes, via a new flag
618 * added to MI_ARB_CHECK, or emit the writes from a different intel_context. For
619 * the in-kernel use-cases we've opted to use a separate context, see
620 * reloc_gpu() as an example.
621 * All the above applies only to the instructions themselves. Non-inline data
622 * used by the instructions is not pre-fetched.
625 static u32 *gen12_emit_preempt_busywait(struct i915_request *rq, u32 *cs)
627 *cs++ = MI_ARB_CHECK; /* trigger IDLE->ACTIVE first */
628 *cs++ = MI_SEMAPHORE_WAIT_TOKEN |
629 MI_SEMAPHORE_GLOBAL_GTT |
631 MI_SEMAPHORE_SAD_EQ_SDD;
633 *cs++ = preempt_address(rq->engine);
640 /* Wa_14014475959:dg2 */
641 #define CCS_SEMAPHORE_PPHWSP_OFFSET 0x540
642 static u32 ccs_semaphore_offset(struct i915_request *rq)
644 return i915_ggtt_offset(rq->context->state) +
645 (LRC_PPHWSP_PN * PAGE_SIZE) + CCS_SEMAPHORE_PPHWSP_OFFSET;
648 /* Wa_14014475959:dg2 */
649 static u32 *ccs_emit_wa_busywait(struct i915_request *rq, u32 *cs)
653 *cs++ = MI_ATOMIC_INLINE | MI_ATOMIC_GLOBAL_GTT | MI_ATOMIC_CS_STALL |
655 *cs++ = ccs_semaphore_offset(rq);
660 * When MI_ATOMIC_INLINE_DATA set this command must be 11 DW + (1 NOP)
661 * to align. 4 DWs above + 8 filler DWs here.
663 for (i = 0; i < 8; ++i)
666 *cs++ = MI_SEMAPHORE_WAIT |
667 MI_SEMAPHORE_GLOBAL_GTT |
669 MI_SEMAPHORE_SAD_EQ_SDD;
671 *cs++ = ccs_semaphore_offset(rq);
677 static __always_inline u32*
678 gen12_emit_fini_breadcrumb_tail(struct i915_request *rq, u32 *cs)
680 *cs++ = MI_USER_INTERRUPT;
682 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
683 if (intel_engine_has_semaphores(rq->engine) &&
684 !intel_uc_uses_guc_submission(&rq->engine->gt->uc))
685 cs = gen12_emit_preempt_busywait(rq, cs);
687 /* Wa_14014475959:dg2 */
688 if (intel_engine_uses_wa_hold_ccs_switchout(rq->engine))
689 cs = ccs_emit_wa_busywait(rq, cs);
691 rq->tail = intel_ring_offset(rq, cs);
692 assert_ring_tail_valid(rq->ring, rq->tail);
694 return gen8_emit_wa_tail(rq, cs);
697 u32 *gen12_emit_fini_breadcrumb_xcs(struct i915_request *rq, u32 *cs)
699 /* XXX Stalling flush before seqno write; post-sync not */
700 cs = emit_xcs_breadcrumb(rq, __gen8_emit_flush_dw(cs, 0, 0, 0));
701 return gen12_emit_fini_breadcrumb_tail(rq, cs);
704 u32 *gen12_emit_fini_breadcrumb_rcs(struct i915_request *rq, u32 *cs)
706 struct drm_i915_private *i915 = rq->engine->i915;
707 u32 flags = (PIPE_CONTROL_CS_STALL |
708 PIPE_CONTROL_TILE_CACHE_FLUSH |
709 PIPE_CONTROL_FLUSH_L3 |
710 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH |
711 PIPE_CONTROL_DEPTH_CACHE_FLUSH |
712 PIPE_CONTROL_DC_FLUSH_ENABLE |
713 PIPE_CONTROL_FLUSH_ENABLE);
715 if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
717 flags |= PIPE_CONTROL_DEPTH_STALL;
719 if (rq->engine->class == COMPUTE_CLASS)
720 flags &= ~PIPE_CONTROL_3D_FLAGS;
722 cs = gen12_emit_ggtt_write_rcs(cs,
725 PIPE_CONTROL0_HDC_PIPELINE_FLUSH,
728 return gen12_emit_fini_breadcrumb_tail(rq, cs);