1 // SPDX-License-Identifier: MIT
3 * Copyright © 2022 Intel Corporation
6 #include "xe_ring_ops.h"
8 #include "regs/xe_gpu_commands.h"
9 #include "regs/xe_gt_regs.h"
10 #include "regs/xe_lrc_layout.h"
11 #include "regs/xe_regs.h"
12 #include "xe_engine_types.h"
15 #include "xe_macros.h"
16 #include "xe_sched_job.h"
17 #include "xe_vm_types.h"
21 * 3D-related flags that can't be set on _engines_ that lack access to the 3D
22 * pipeline (i.e., CCS engines).
24 #define PIPE_CONTROL_3D_ENGINE_FLAGS (\
25 PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH | \
26 PIPE_CONTROL_DEPTH_CACHE_FLUSH | \
27 PIPE_CONTROL_TILE_CACHE_FLUSH | \
28 PIPE_CONTROL_DEPTH_STALL | \
29 PIPE_CONTROL_STALL_AT_SCOREBOARD | \
30 PIPE_CONTROL_PSD_SYNC | \
31 PIPE_CONTROL_AMFS_FLUSH | \
32 PIPE_CONTROL_VF_CACHE_INVALIDATE | \
33 PIPE_CONTROL_GLOBAL_SNAPSHOT_RESET)
35 /* 3D-related flags that can't be set on _platforms_ that lack a 3D pipeline */
36 #define PIPE_CONTROL_3D_ARCH_FLAGS ( \
37 PIPE_CONTROL_3D_ENGINE_FLAGS | \
38 PIPE_CONTROL_INDIRECT_STATE_DISABLE | \
39 PIPE_CONTROL_FLUSH_ENABLE | \
40 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE | \
41 PIPE_CONTROL_DC_FLUSH_ENABLE)
43 static u32 preparser_disable(bool state)
45 return MI_ARB_CHECK | BIT(8) | state;
48 static int emit_aux_table_inv(struct xe_gt *gt, struct xe_reg reg,
51 dw[i++] = MI_LOAD_REGISTER_IMM(1) | MI_LRI_MMIO_REMAP_EN;
52 dw[i++] = reg.addr + gt->mmio.adj_offset;
59 static int emit_user_interrupt(u32 *dw, int i)
61 dw[i++] = MI_USER_INTERRUPT;
62 dw[i++] = MI_ARB_ON_OFF | MI_ARB_ENABLE;
63 dw[i++] = MI_ARB_CHECK;
68 static int emit_store_imm_ggtt(u32 addr, u32 value, u32 *dw, int i)
70 dw[i++] = MI_STORE_DATA_IMM | BIT(22) /* GGTT */ | 2;
78 static int emit_flush_imm_ggtt(u32 addr, u32 value, bool invalidate_tlb,
81 dw[i++] = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW |
82 (invalidate_tlb ? MI_INVALIDATE_TLB : 0);
83 dw[i++] = addr | MI_FLUSH_DW_USE_GTT;
90 static int emit_bb_start(u64 batch_addr, u32 ppgtt_flag, u32 *dw, int i)
92 dw[i++] = MI_BATCH_BUFFER_START | ppgtt_flag;
93 dw[i++] = lower_32_bits(batch_addr);
94 dw[i++] = upper_32_bits(batch_addr);
99 static int emit_flush_invalidate(u32 flag, u32 *dw, int i)
101 dw[i] = MI_FLUSH_DW + 1;
103 dw[i++] |= MI_INVALIDATE_TLB | MI_FLUSH_DW_OP_STOREDW |
104 MI_FLUSH_DW_STORE_INDEX;
106 dw[i++] = LRC_PPHWSP_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
113 static int emit_pipe_invalidate(u32 mask_flags, bool invalidate_tlb, u32 *dw,
116 u32 flags = PIPE_CONTROL_CS_STALL |
117 PIPE_CONTROL_COMMAND_CACHE_INVALIDATE |
118 PIPE_CONTROL_INSTRUCTION_CACHE_INVALIDATE |
119 PIPE_CONTROL_TEXTURE_CACHE_INVALIDATE |
120 PIPE_CONTROL_VF_CACHE_INVALIDATE |
121 PIPE_CONTROL_CONST_CACHE_INVALIDATE |
122 PIPE_CONTROL_STATE_CACHE_INVALIDATE |
123 PIPE_CONTROL_QW_WRITE |
124 PIPE_CONTROL_STORE_DATA_INDEX;
127 flags |= PIPE_CONTROL_TLB_INVALIDATE;
129 flags &= ~mask_flags;
131 dw[i++] = GFX_OP_PIPE_CONTROL(6);
133 dw[i++] = LRC_PPHWSP_SCRATCH_ADDR;
141 #define MI_STORE_QWORD_IMM_GEN8_POSTED (MI_INSTR(0x20, 3) | (1 << 21))
143 static int emit_store_imm_ppgtt_posted(u64 addr, u64 value,
146 dw[i++] = MI_STORE_QWORD_IMM_GEN8_POSTED;
147 dw[i++] = lower_32_bits(addr);
148 dw[i++] = upper_32_bits(addr);
149 dw[i++] = lower_32_bits(value);
150 dw[i++] = upper_32_bits(value);
155 static int emit_pipe_imm_ggtt(u32 addr, u32 value, bool stall_only, u32 *dw,
158 dw[i++] = GFX_OP_PIPE_CONTROL(6);
159 dw[i++] = (stall_only ? PIPE_CONTROL_CS_STALL :
160 PIPE_CONTROL_FLUSH_ENABLE | PIPE_CONTROL_CS_STALL) |
161 PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE;
165 dw[i++] = 0; /* We're thrashing one extra dword. */
170 static u32 get_ppgtt_flag(struct xe_sched_job *job)
172 return !(job->engine->flags & ENGINE_FLAG_WA) ? BIT(8) : 0;
175 static void __emit_job_gen12_copy(struct xe_sched_job *job, struct xe_lrc *lrc,
176 u64 batch_addr, u32 seqno)
178 u32 dw[MAX_JOB_SIZE_DW], i = 0;
179 u32 ppgtt_flag = get_ppgtt_flag(job);
180 struct xe_vm *vm = job->engine->vm;
182 if (vm->batch_invalidate_tlb) {
183 dw[i++] = preparser_disable(true);
184 i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
186 dw[i++] = preparser_disable(false);
188 i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
192 i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
194 if (job->user_fence.used)
195 i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
196 job->user_fence.value,
199 i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i);
201 i = emit_user_interrupt(dw, i);
203 XE_BUG_ON(i > MAX_JOB_SIZE_DW);
205 xe_lrc_write_ring(lrc, dw, i * sizeof(*dw));
208 static bool has_aux_ccs(struct xe_device *xe)
211 * PVC is a special case that has no compression of either type
212 * (FlatCCS or AuxCCS).
214 if (xe->info.platform == XE_PVC)
217 return !xe->info.has_flat_ccs;
220 static void __emit_job_gen12_video(struct xe_sched_job *job, struct xe_lrc *lrc,
221 u64 batch_addr, u32 seqno)
223 u32 dw[MAX_JOB_SIZE_DW], i = 0;
224 u32 ppgtt_flag = get_ppgtt_flag(job);
225 struct xe_gt *gt = job->engine->gt;
226 struct xe_device *xe = gt_to_xe(gt);
227 bool decode = job->engine->class == XE_ENGINE_CLASS_VIDEO_DECODE;
228 struct xe_vm *vm = job->engine->vm;
230 dw[i++] = preparser_disable(true);
232 /* hsdes: 1809175790 */
233 if (has_aux_ccs(xe)) {
235 i = emit_aux_table_inv(gt, VD0_AUX_INV, dw, i);
237 i = emit_aux_table_inv(gt, VE0_AUX_INV, dw, i);
240 if (vm->batch_invalidate_tlb)
241 i = emit_flush_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
244 dw[i++] = preparser_disable(false);
246 if (!vm->batch_invalidate_tlb)
247 i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
250 i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
252 if (job->user_fence.used)
253 i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
254 job->user_fence.value,
257 i = emit_flush_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, false, dw, i);
259 i = emit_user_interrupt(dw, i);
261 XE_BUG_ON(i > MAX_JOB_SIZE_DW);
263 xe_lrc_write_ring(lrc, dw, i * sizeof(*dw));
266 static void __emit_job_gen12_render_compute(struct xe_sched_job *job,
268 u64 batch_addr, u32 seqno)
270 u32 dw[MAX_JOB_SIZE_DW], i = 0;
271 u32 ppgtt_flag = get_ppgtt_flag(job);
272 struct xe_gt *gt = job->engine->gt;
273 struct xe_device *xe = gt_to_xe(gt);
274 bool lacks_render = !(gt->info.engine_mask & XE_HW_ENGINE_RCS_MASK);
275 struct xe_vm *vm = job->engine->vm;
278 dw[i++] = preparser_disable(true);
280 mask_flags = PIPE_CONTROL_3D_ARCH_FLAGS;
281 else if (job->engine->class == XE_ENGINE_CLASS_COMPUTE)
282 mask_flags = PIPE_CONTROL_3D_ENGINE_FLAGS;
284 /* See __xe_pt_bind_vma() for a discussion on TLB invalidations. */
285 i = emit_pipe_invalidate(mask_flags, vm->batch_invalidate_tlb, dw, i);
287 /* hsdes: 1809175790 */
289 i = emit_aux_table_inv(gt, CCS_AUX_INV, dw, i);
291 dw[i++] = preparser_disable(false);
293 i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
296 i = emit_bb_start(batch_addr, ppgtt_flag, dw, i);
298 if (job->user_fence.used)
299 i = emit_store_imm_ppgtt_posted(job->user_fence.addr,
300 job->user_fence.value,
303 i = emit_pipe_imm_ggtt(xe_lrc_seqno_ggtt_addr(lrc), seqno, lacks_render, dw, i);
305 i = emit_user_interrupt(dw, i);
307 XE_BUG_ON(i > MAX_JOB_SIZE_DW);
309 xe_lrc_write_ring(lrc, dw, i * sizeof(*dw));
312 static void emit_migration_job_gen12(struct xe_sched_job *job,
313 struct xe_lrc *lrc, u32 seqno)
315 u32 dw[MAX_JOB_SIZE_DW], i = 0;
317 i = emit_store_imm_ggtt(xe_lrc_start_seqno_ggtt_addr(lrc),
320 i = emit_bb_start(job->batch_addr[0], BIT(8), dw, i);
322 /* XXX: Do we need this? Leaving for now. */
323 dw[i++] = preparser_disable(true);
324 i = emit_flush_invalidate(0, dw, i);
325 dw[i++] = preparser_disable(false);
327 i = emit_bb_start(job->batch_addr[1], BIT(8), dw, i);
329 dw[i++] = (MI_FLUSH_DW | MI_INVALIDATE_TLB | job->migrate_flush_flags |
330 MI_FLUSH_DW_OP_STOREDW) + 1;
331 dw[i++] = xe_lrc_seqno_ggtt_addr(lrc) | MI_FLUSH_DW_USE_GTT;
333 dw[i++] = seqno; /* value */
335 i = emit_user_interrupt(dw, i);
337 XE_BUG_ON(i > MAX_JOB_SIZE_DW);
339 xe_lrc_write_ring(lrc, dw, i * sizeof(*dw));
342 static void emit_job_gen12_copy(struct xe_sched_job *job)
346 if (xe_sched_job_is_migration(job->engine)) {
347 emit_migration_job_gen12(job, job->engine->lrc,
348 xe_sched_job_seqno(job));
352 for (i = 0; i < job->engine->width; ++i)
353 __emit_job_gen12_copy(job, job->engine->lrc + i,
355 xe_sched_job_seqno(job));
358 static void emit_job_gen12_video(struct xe_sched_job *job)
362 /* FIXME: Not doing parallel handshake for now */
363 for (i = 0; i < job->engine->width; ++i)
364 __emit_job_gen12_video(job, job->engine->lrc + i,
366 xe_sched_job_seqno(job));
369 static void emit_job_gen12_render_compute(struct xe_sched_job *job)
373 for (i = 0; i < job->engine->width; ++i)
374 __emit_job_gen12_render_compute(job, job->engine->lrc + i,
376 xe_sched_job_seqno(job));
379 static const struct xe_ring_ops ring_ops_gen12_copy = {
380 .emit_job = emit_job_gen12_copy,
383 static const struct xe_ring_ops ring_ops_gen12_video = {
384 .emit_job = emit_job_gen12_video,
387 static const struct xe_ring_ops ring_ops_gen12_render_compute = {
388 .emit_job = emit_job_gen12_render_compute,
391 const struct xe_ring_ops *
392 xe_ring_ops_get(struct xe_gt *gt, enum xe_engine_class class)
395 case XE_ENGINE_CLASS_COPY:
396 return &ring_ops_gen12_copy;
397 case XE_ENGINE_CLASS_VIDEO_DECODE:
398 case XE_ENGINE_CLASS_VIDEO_ENHANCE:
399 return &ring_ops_gen12_video;
400 case XE_ENGINE_CLASS_RENDER:
401 case XE_ENGINE_CLASS_COMPUTE:
402 return &ring_ops_gen12_render_compute;