1 // SPDX-License-Identifier: MIT
3 * Copyright © 2014-2018 Intel Corporation
8 #include "intel_context.h"
9 #include "intel_engine_pm.h"
10 #include "intel_engine_regs.h"
11 #include "intel_gpu_commands.h"
13 #include "intel_gt_mcr.h"
14 #include "intel_gt_print.h"
15 #include "intel_gt_regs.h"
16 #include "intel_ring.h"
17 #include "intel_workarounds.h"
20 * DOC: Hardware workarounds
22 * Hardware workarounds are register programming documented to be executed in
23 * the driver that fall outside of the normal programming sequences for a
24 * platform. There are some basic categories of workarounds, depending on
25 * how/when they are applied:
27 * - Context workarounds: workarounds that touch registers that are
28 * saved/restored to/from the HW context image. The list is emitted (via Load
29 * Register Immediate commands) once when initializing the device and saved in
30 * the default context. That default context is then used on every context
31 * creation to have a "primed golden context", i.e. a context image that
32 * already contains the changes needed to all the registers.
34 * Context workarounds should be implemented in the \*_ctx_workarounds_init()
35 * variants respective to the targeted platforms.
37 * - Engine workarounds: the list of these WAs is applied whenever the specific
38 * engine is reset. It's also possible that a set of engine classes share a
39 * common power domain and they are reset together. This happens on some
40 * platforms with render and compute engines. In this case (at least) one of
41 * them need to keeep the workaround programming: the approach taken in the
42 * driver is to tie those workarounds to the first compute/render engine that
43 * is registered. When executing with GuC submission, engine resets are
44 * outside of kernel driver control, hence the list of registers involved in
45 * written once, on engine initialization, and then passed to GuC, that
46 * saves/restores their values before/after the reset takes place. See
47 * ``drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c`` for reference.
49 * Workarounds for registers specific to RCS and CCS should be implemented in
50 * rcs_engine_wa_init() and ccs_engine_wa_init(), respectively; those for
51 * registers belonging to BCS, VCS or VECS should be implemented in
52 * xcs_engine_wa_init(). Workarounds for registers not belonging to a specific
53 * engine's MMIO range but that are part of of the common RCS/CCS reset domain
54 * should be implemented in general_render_compute_wa_init().
56 * - GT workarounds: the list of these WAs is applied whenever these registers
57 * revert to their default values: on GPU reset, suspend/resume [1]_, etc.
59 * GT workarounds should be implemented in the \*_gt_workarounds_init()
60 * variants respective to the targeted platforms.
62 * - Register whitelist: some workarounds need to be implemented in userspace,
63 * but need to touch privileged registers. The whitelist in the kernel
64 * instructs the hardware to allow the access to happen. From the kernel side,
65 * this is just a special case of a MMIO workaround (as we write the list of
66 * these to/be-whitelisted registers to some special HW registers).
68 * Register whitelisting should be done in the \*_whitelist_build() variants
69 * respective to the targeted platforms.
71 * - Workaround batchbuffers: buffers that get executed automatically by the
72 * hardware on every HW context restore. These buffers are created and
73 * programmed in the default context so the hardware always go through those
74 * programming sequences when switching contexts. The support for workaround
75 * batchbuffers is enabled these hardware mechanisms:
77 * #. INDIRECT_CTX: A batchbuffer and an offset are provided in the default
78 * context, pointing the hardware to jump to that location when that offset
79 * is reached in the context restore. Workaround batchbuffer in the driver
80 * currently uses this mechanism for all platforms.
82 * #. BB_PER_CTX_PTR: A batchbuffer is provided in the default context,
83 * pointing the hardware to a buffer to continue executing after the
84 * engine registers are restored in a context restore sequence. This is
85 * currently not used in the driver.
87 * - Other: There are WAs that, due to their nature, cannot be applied from a
88 * central place. Those are peppered around the rest of the code, as needed.
89 * Workarounds related to the display IP are the main example.
91 * .. [1] Technically, some registers are powercontext saved & restored, so they
92 * survive a suspend/resume. In practice, writing them again is not too
93 * costly and simplifies things, so it's the approach taken in the driver.
96 static void wa_init_start(struct i915_wa_list *wal, struct intel_gt *gt,
97 const char *name, const char *engine_name)
101 wal->engine_name = engine_name;
104 #define WA_LIST_CHUNK (1 << 4)
106 static void wa_init_finish(struct i915_wa_list *wal)
108 /* Trim unused entries. */
109 if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
110 struct i915_wa *list = kmemdup(wal->list,
111 wal->count * sizeof(*list),
123 gt_dbg(wal->gt, "Initialized %u %s workarounds on %s\n",
124 wal->wa_count, wal->name, wal->engine_name);
127 static enum forcewake_domains
128 wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
130 enum forcewake_domains fw = 0;
134 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
135 fw |= intel_uncore_forcewake_for_reg(uncore,
143 static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
145 unsigned int addr = i915_mmio_reg_offset(wa->reg);
146 struct drm_i915_private *i915 = wal->gt->i915;
147 unsigned int start = 0, end = wal->count;
148 const unsigned int grow = WA_LIST_CHUNK;
151 GEM_BUG_ON(!is_power_of_2(grow));
153 if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
154 struct i915_wa *list;
156 list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
159 drm_err(&i915->drm, "No space for workaround init!\n");
164 memcpy(list, wal->list, sizeof(*wa) * wal->count);
171 while (start < end) {
172 unsigned int mid = start + (end - start) / 2;
174 if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
176 } else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
179 wa_ = &wal->list[mid];
181 if ((wa->clr | wa_->clr) && !(wa->clr & ~wa_->clr)) {
183 "Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n",
184 i915_mmio_reg_offset(wa_->reg),
187 wa_->set &= ~wa->clr;
193 wa_->read |= wa->read;
199 wa_ = &wal->list[wal->count++];
202 while (wa_-- > wal->list) {
203 GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
204 i915_mmio_reg_offset(wa_[1].reg));
205 if (i915_mmio_reg_offset(wa_[1].reg) >
206 i915_mmio_reg_offset(wa_[0].reg))
209 swap(wa_[1], wa_[0]);
213 static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
214 u32 clear, u32 set, u32 read_mask, bool masked_reg)
216 struct i915_wa wa = {
221 .masked_reg = masked_reg,
227 static void wa_mcr_add(struct i915_wa_list *wal, i915_mcr_reg_t reg,
228 u32 clear, u32 set, u32 read_mask, bool masked_reg)
230 struct i915_wa wa = {
235 .masked_reg = masked_reg,
243 wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
245 wa_add(wal, reg, clear, set, clear | set, false);
249 wa_mcr_write_clr_set(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clear, u32 set)
251 wa_mcr_add(wal, reg, clear, set, clear | set, false);
255 wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
257 wa_write_clr_set(wal, reg, ~0, set);
261 wa_mcr_write(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set)
263 wa_mcr_write_clr_set(wal, reg, ~0, set);
267 wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
269 wa_write_clr_set(wal, reg, set, set);
273 wa_mcr_write_or(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set)
275 wa_mcr_write_clr_set(wal, reg, set, set);
279 wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr)
281 wa_write_clr_set(wal, reg, clr, 0);
285 wa_mcr_write_clr(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clr)
287 wa_mcr_write_clr_set(wal, reg, clr, 0);
291 * WA operations on "masked register". A masked register has the upper 16 bits
292 * documented as "masked" in b-spec. Its purpose is to allow writing to just a
293 * portion of the register without a rmw: you simply write in the upper 16 bits
294 * the mask of bits you are going to modify.
296 * The wa_masked_* family of functions already does the necessary operations to
297 * calculate the mask based on the parameters passed, so user only has to
298 * provide the lower 16 bits of that register.
302 wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
304 wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true);
308 wa_mcr_masked_en(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
310 wa_mcr_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true);
314 wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
316 wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true);
320 wa_mcr_masked_dis(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
322 wa_mcr_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true);
326 wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg,
329 wa_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true);
333 wa_mcr_masked_field_set(struct i915_wa_list *wal, i915_mcr_reg_t reg,
336 wa_mcr_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true);
339 static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
340 struct i915_wa_list *wal)
342 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
345 static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
346 struct i915_wa_list *wal)
348 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
351 static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
352 struct i915_wa_list *wal)
354 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
356 /* WaDisableAsyncFlipPerfMode:bdw,chv */
357 wa_masked_en(wal, RING_MI_MODE(RENDER_RING_BASE), ASYNC_FLIP_PERF_DISABLE);
359 /* WaDisablePartialInstShootdown:bdw,chv */
360 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
361 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
363 /* Use Force Non-Coherent whenever executing a 3D context. This is a
364 * workaround for a possible hang in the unlikely event a TLB
365 * invalidation occurs during a PSD flush.
367 /* WaForceEnableNonCoherent:bdw,chv */
368 /* WaHdcDisableFetchWhenMasked:bdw,chv */
369 wa_masked_en(wal, HDC_CHICKEN0,
370 HDC_DONOT_FETCH_MEM_WHEN_MASKED |
371 HDC_FORCE_NON_COHERENT);
373 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
374 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
375 * polygons in the same 8x4 pixel/sample area to be processed without
376 * stalling waiting for the earlier ones to write to Hierarchical Z
379 * This optimization is off by default for BDW and CHV; turn it on.
381 wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
383 /* Wa4x4STCOptimizationDisable:bdw,chv */
384 wa_masked_en(wal, CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
387 * BSpec recommends 8x4 when MSAA is used,
388 * however in practice 16x4 seems fastest.
390 * Note that PS/WM thread counts depend on the WIZ hashing
391 * disable bit, which we don't touch here, but it's good
392 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
394 wa_masked_field_set(wal, GEN7_GT_MODE,
395 GEN6_WIZ_HASHING_MASK,
396 GEN6_WIZ_HASHING_16x4);
399 static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
400 struct i915_wa_list *wal)
402 struct drm_i915_private *i915 = engine->i915;
404 gen8_ctx_workarounds_init(engine, wal);
406 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
407 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
409 /* WaDisableDopClockGating:bdw
411 * Also see the related UCGTCL1 write in bdw_init_clock_gating()
412 * to disable EUTC clock gating.
414 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
415 DOP_CLOCK_GATING_DISABLE);
417 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN3,
418 GEN8_SAMPLER_POWER_BYPASS_DIS);
420 wa_masked_en(wal, HDC_CHICKEN0,
421 /* WaForceContextSaveRestoreNonCoherent:bdw */
422 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
423 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
424 (IS_BROADWELL_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
427 static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
428 struct i915_wa_list *wal)
430 gen8_ctx_workarounds_init(engine, wal);
432 /* WaDisableThreadStallDopClockGating:chv */
433 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
435 /* Improve HiZ throughput on CHV. */
436 wa_masked_en(wal, HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
439 static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
440 struct i915_wa_list *wal)
442 struct drm_i915_private *i915 = engine->i915;
445 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
447 * Must match Display Engine. See
448 * WaCompressedResourceDisplayNewHashMode.
450 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
451 GEN9_PBE_COMPRESSED_HASH_SELECTION);
452 wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
453 GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
456 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
457 /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
458 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
459 FLOW_CONTROL_ENABLE |
460 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
462 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
463 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
464 wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
465 GEN9_ENABLE_YV12_BUGFIX |
466 GEN9_ENABLE_GPGPU_PREEMPTION);
468 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
469 /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
470 wa_masked_en(wal, CACHE_MODE_1,
471 GEN8_4x4_STC_OPTIMIZATION_DISABLE |
472 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
474 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
475 wa_mcr_masked_dis(wal, GEN9_HALF_SLICE_CHICKEN5,
476 GEN9_CCS_TLB_PREFETCH_ENABLE);
478 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
479 wa_masked_en(wal, HDC_CHICKEN0,
480 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
481 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
483 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
484 * both tied to WaForceContextSaveRestoreNonCoherent
485 * in some hsds for skl. We keep the tie for all gen9. The
486 * documentation is a bit hazy and so we want to get common behaviour,
487 * even though there is no clear evidence we would need both on kbl/bxt.
488 * This area has been source of system hangs so we play it safe
489 * and mimic the skl regardless of what bspec says.
491 * Use Force Non-Coherent whenever executing a 3D context. This
492 * is a workaround for a possible hang in the unlikely event
493 * a TLB invalidation occurs during a PSD flush.
496 /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
497 wa_masked_en(wal, HDC_CHICKEN0,
498 HDC_FORCE_NON_COHERENT);
500 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
501 if (IS_SKYLAKE(i915) ||
503 IS_COFFEELAKE(i915) ||
505 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN3,
506 GEN8_SAMPLER_POWER_BYPASS_DIS);
508 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
509 wa_mcr_masked_en(wal, HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
512 * Supporting preemption with fine-granularity requires changes in the
513 * batch buffer programming. Since we can't break old userspace, we
514 * need to set our default preemption level to safe value. Userspace is
515 * still able to use more fine-grained preemption levels, since in
516 * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
517 * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
518 * not real HW workarounds, but merely a way to start using preemption
519 * while maintaining old contract with userspace.
522 /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
523 wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
525 /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
526 wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
527 GEN9_PREEMPT_GPGPU_LEVEL_MASK,
528 GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
530 /* WaClearHIZ_WM_CHICKEN3:bxt,glk */
531 if (IS_GEN9_LP(i915))
532 wa_masked_en(wal, GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
535 static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
536 struct i915_wa_list *wal)
538 struct intel_gt *gt = engine->gt;
539 u8 vals[3] = { 0, 0, 0 };
542 for (i = 0; i < 3; i++) {
546 * Only consider slices where one, and only one, subslice has 7
549 if (!is_power_of_2(gt->info.sseu.subslice_7eu[i]))
553 * subslice_7eu[i] != 0 (because of the check above) and
554 * ss_max == 4 (maximum number of subslices possible per slice)
558 ss = ffs(gt->info.sseu.subslice_7eu[i]) - 1;
562 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
565 /* Tune IZ hashing. See intel_device_info_runtime_init() */
566 wa_masked_field_set(wal, GEN7_GT_MODE,
567 GEN9_IZ_HASHING_MASK(2) |
568 GEN9_IZ_HASHING_MASK(1) |
569 GEN9_IZ_HASHING_MASK(0),
570 GEN9_IZ_HASHING(2, vals[2]) |
571 GEN9_IZ_HASHING(1, vals[1]) |
572 GEN9_IZ_HASHING(0, vals[0]));
575 static void skl_ctx_workarounds_init(struct intel_engine_cs *engine,
576 struct i915_wa_list *wal)
578 gen9_ctx_workarounds_init(engine, wal);
579 skl_tune_iz_hashing(engine, wal);
582 static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
583 struct i915_wa_list *wal)
585 gen9_ctx_workarounds_init(engine, wal);
587 /* WaDisableThreadStallDopClockGating:bxt */
588 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
589 STALL_DOP_GATING_DISABLE);
591 /* WaToEnableHwFixForPushConstHWBug:bxt */
592 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
593 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
596 static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
597 struct i915_wa_list *wal)
599 struct drm_i915_private *i915 = engine->i915;
601 gen9_ctx_workarounds_init(engine, wal);
603 /* WaToEnableHwFixForPushConstHWBug:kbl */
604 if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER))
605 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
606 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
608 /* WaDisableSbeCacheDispatchPortSharing:kbl */
609 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
610 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
613 static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
614 struct i915_wa_list *wal)
616 gen9_ctx_workarounds_init(engine, wal);
618 /* WaToEnableHwFixForPushConstHWBug:glk */
619 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
620 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
623 static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
624 struct i915_wa_list *wal)
626 gen9_ctx_workarounds_init(engine, wal);
628 /* WaToEnableHwFixForPushConstHWBug:cfl */
629 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
630 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
632 /* WaDisableSbeCacheDispatchPortSharing:cfl */
633 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
634 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
637 static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
638 struct i915_wa_list *wal)
640 /* Wa_1406697149 (WaDisableBankHangMode:icl) */
641 wa_write(wal, GEN8_L3CNTLREG, GEN8_ERRDETBCTRL);
643 /* WaForceEnableNonCoherent:icl
644 * This is not the same workaround as in early Gen9 platforms, where
645 * lacking this could cause system hangs, but coherency performance
646 * overhead is high and only a few compute workloads really need it
647 * (the register is whitelisted in hardware now, so UMDs can opt in
648 * for coherency if they have a good reason).
650 wa_mcr_masked_en(wal, ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
652 /* WaEnableFloatBlendOptimization:icl */
653 wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
654 _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE),
655 0 /* write-only, so skip validation */,
658 /* WaDisableGPGPUMidThreadPreemption:icl */
659 wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
660 GEN9_PREEMPT_GPGPU_LEVEL_MASK,
661 GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
663 /* allow headerless messages for preemptible GPGPU context */
664 wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
665 GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
667 /* Wa_1604278689:icl,ehl */
668 wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
669 wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER,
673 /* Wa_1406306137:icl,ehl */
674 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
678 * These settings aren't actually workarounds, but general tuning settings that
679 * need to be programmed on dg2 platform.
681 static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
682 struct i915_wa_list *wal)
684 wa_mcr_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
685 wa_mcr_write_clr_set(wal, XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
686 REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
687 wa_mcr_write_clr_set(wal, XEHP_FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
688 FF_MODE2_TDS_TIMER_128);
691 static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
692 struct i915_wa_list *wal)
694 struct drm_i915_private *i915 = engine->i915;
697 * Wa_1409142259:tgl,dg1,adl-p
698 * Wa_1409347922:tgl,dg1,adl-p
699 * Wa_1409252684:tgl,dg1,adl-p
700 * Wa_1409217633:tgl,dg1,adl-p
701 * Wa_1409207793:tgl,dg1,adl-p
702 * Wa_1409178076:tgl,dg1,adl-p
703 * Wa_1408979724:tgl,dg1,adl-p
704 * Wa_14010443199:tgl,rkl,dg1,adl-p
705 * Wa_14010698770:tgl,rkl,dg1,adl-s,adl-p
706 * Wa_1409342910:tgl,rkl,dg1,adl-s,adl-p
708 wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
709 GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
711 /* WaDisableGPGPUMidThreadPreemption:gen12 */
712 wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
713 GEN9_PREEMPT_GPGPU_LEVEL_MASK,
714 GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
717 * Wa_16011163337 - GS_TIMER
719 * TDS_TIMER: Although some platforms refer to it as Wa_1604555607, we
720 * need to program it even on those that don't explicitly list that
723 * Note that the programming of GEN12_FF_MODE2 is further modified
724 * according to the FF_MODE2 guidance given by Wa_1608008084.
725 * Wa_1608008084 tells us the FF_MODE2 register will return the wrong
726 * value when read from the CPU.
728 * The default value for this register is zero for all fields.
729 * So instead of doing a RMW we should just write the desired values
730 * for TDS and GS timers. Note that since the readback can't be trusted,
731 * the clear mask is just set to ~0 to make sure other bits are not
732 * inadvertently set. For the same reason read verification is ignored.
737 FF_MODE2_TDS_TIMER_128 | FF_MODE2_GS_TIMER_224,
742 wa_masked_en(wal, HIZ_CHICKEN, HZ_DEPTH_TEST_LE_GE_OPT_DISABLE);
745 wa_masked_en(wal, COMMON_SLICE_CHICKEN4, DISABLE_TDC_LOAD_BALANCING_CALC);
749 static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
750 struct i915_wa_list *wal)
752 gen12_ctx_workarounds_init(engine, wal);
755 wa_masked_dis(wal, GEN11_COMMON_SLICE_CHICKEN3,
756 DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN);
759 wa_masked_en(wal, HIZ_CHICKEN,
760 DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
763 static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
764 struct i915_wa_list *wal)
766 dg2_ctx_gt_tuning_init(engine, wal);
768 /* Wa_16013271637:dg2 */
769 wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1,
770 MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
772 /* Wa_14014947963:dg2 */
773 wa_masked_field_set(wal, VF_PREEMPTION, PREEMPTION_VERTEX_COUNT, 0x4000);
775 /* Wa_18018764978:dg2 */
776 wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
778 /* Wa_18019271663:dg2 */
779 wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
782 static void xelpg_ctx_gt_tuning_init(struct intel_engine_cs *engine,
783 struct i915_wa_list *wal)
785 struct intel_gt *gt = engine->gt;
787 dg2_ctx_gt_tuning_init(engine, wal);
789 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) ||
790 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER))
791 wa_add(wal, DRAW_WATERMARK, VERT_WM_VAL, 0x3FF, 0, false);
794 static void xelpg_ctx_workarounds_init(struct intel_engine_cs *engine,
795 struct i915_wa_list *wal)
797 struct intel_gt *gt = engine->gt;
799 xelpg_ctx_gt_tuning_init(engine, wal);
801 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
802 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
804 wa_masked_field_set(wal, VF_PREEMPTION,
805 PREEMPTION_VERTEX_COUNT, 0x4000);
808 wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1,
809 MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
812 wa_mcr_masked_en(wal, VFLSKPD, VF_PREFETCH_TLB_DIS);
815 wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
819 wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
822 static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
823 struct i915_wa_list *wal)
826 * This is a "fake" workaround defined by software to ensure we
827 * maintain reliable, backward-compatible behavior for userspace with
828 * regards to how nested MI_BATCH_BUFFER_START commands are handled.
830 * The per-context setting of MI_MODE[12] determines whether the bits
831 * of a nested MI_BATCH_BUFFER_START instruction should be interpreted
832 * in the traditional manner or whether they should instead use a new
833 * tgl+ meaning that breaks backward compatibility, but allows nesting
834 * into 3rd-level batchbuffers. When this new capability was first
835 * added in TGL, it remained off by default unless a context
836 * intentionally opted in to the new behavior. However Xe_HPG now
837 * flips this on by default and requires that we explicitly opt out if
838 * we don't want the new behavior.
840 * From a SW perspective, we want to maintain the backward-compatible
841 * behavior for userspace, so we'll apply a fake workaround to set it
842 * back to the legacy behavior on platforms where the hardware default
843 * is to break compatibility. At the moment there is no Linux
844 * userspace that utilizes third-level batchbuffers, so this will avoid
845 * userspace from needing to make any changes. using the legacy
846 * meaning is the correct thing to do. If/when we have userspace
847 * consumers that want to utilize third-level batch nesting, we can
848 * provide a context parameter to allow them to opt-in.
850 wa_masked_dis(wal, RING_MI_MODE(engine->mmio_base), TGL_NESTED_BB_EN);
853 static void gen12_ctx_gt_mocs_init(struct intel_engine_cs *engine,
854 struct i915_wa_list *wal)
859 * Some blitter commands do not have a field for MOCS, those
860 * commands will use MOCS index pointed by BLIT_CCTL.
861 * BLIT_CCTL registers are needed to be programmed to un-cached.
863 if (engine->class == COPY_ENGINE_CLASS) {
864 mocs = engine->gt->mocs.uc_index;
865 wa_write_clr_set(wal,
866 BLIT_CCTL(engine->mmio_base),
868 BLIT_CCTL_MOCS(mocs, mocs));
873 * gen12_ctx_gt_fake_wa_init() aren't programmingan official workaround
874 * defined by the hardware team, but it programming general context registers.
875 * Adding those context register programming in context workaround
876 * allow us to use the wa framework for proper application and validation.
879 gen12_ctx_gt_fake_wa_init(struct intel_engine_cs *engine,
880 struct i915_wa_list *wal)
882 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
883 fakewa_disable_nestedbb_mode(engine, wal);
885 gen12_ctx_gt_mocs_init(engine, wal);
889 __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
890 struct i915_wa_list *wal,
893 struct drm_i915_private *i915 = engine->i915;
895 wa_init_start(wal, engine->gt, name, engine->name);
897 /* Applies to all engines */
899 * Fake workarounds are not the actual workaround but
900 * programming of context registers using workaround framework.
902 if (GRAPHICS_VER(i915) >= 12)
903 gen12_ctx_gt_fake_wa_init(engine, wal);
905 if (engine->class != RENDER_CLASS)
908 if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71)))
909 xelpg_ctx_workarounds_init(engine, wal);
910 else if (IS_PONTEVECCHIO(i915))
911 ; /* noop; none at this time */
912 else if (IS_DG2(i915))
913 dg2_ctx_workarounds_init(engine, wal);
914 else if (IS_XEHPSDV(i915))
915 ; /* noop; none at this time */
916 else if (IS_DG1(i915))
917 dg1_ctx_workarounds_init(engine, wal);
918 else if (GRAPHICS_VER(i915) == 12)
919 gen12_ctx_workarounds_init(engine, wal);
920 else if (GRAPHICS_VER(i915) == 11)
921 icl_ctx_workarounds_init(engine, wal);
922 else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
923 cfl_ctx_workarounds_init(engine, wal);
924 else if (IS_GEMINILAKE(i915))
925 glk_ctx_workarounds_init(engine, wal);
926 else if (IS_KABYLAKE(i915))
927 kbl_ctx_workarounds_init(engine, wal);
928 else if (IS_BROXTON(i915))
929 bxt_ctx_workarounds_init(engine, wal);
930 else if (IS_SKYLAKE(i915))
931 skl_ctx_workarounds_init(engine, wal);
932 else if (IS_CHERRYVIEW(i915))
933 chv_ctx_workarounds_init(engine, wal);
934 else if (IS_BROADWELL(i915))
935 bdw_ctx_workarounds_init(engine, wal);
936 else if (GRAPHICS_VER(i915) == 7)
937 gen7_ctx_workarounds_init(engine, wal);
938 else if (GRAPHICS_VER(i915) == 6)
939 gen6_ctx_workarounds_init(engine, wal);
940 else if (GRAPHICS_VER(i915) < 8)
943 MISSING_CASE(GRAPHICS_VER(i915));
949 void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
951 __intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context");
954 int intel_engine_emit_ctx_wa(struct i915_request *rq)
956 struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
957 struct intel_uncore *uncore = rq->engine->uncore;
958 enum forcewake_domains fw;
968 ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
972 cs = intel_ring_begin(rq, (wal->count * 2 + 2));
976 fw = wal_get_fw_for_rmw(uncore, wal);
978 intel_gt_mcr_lock(wal->gt, &flags);
979 spin_lock(&uncore->lock);
980 intel_uncore_forcewake_get__locked(uncore, fw);
982 *cs++ = MI_LOAD_REGISTER_IMM(wal->count);
983 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
986 /* Skip reading the register if it's not really needed */
987 if (wa->masked_reg || (wa->clr | wa->set) == U32_MAX) {
991 intel_gt_mcr_read_any_fw(wal->gt, wa->mcr_reg) :
992 intel_uncore_read_fw(uncore, wa->reg);
997 *cs++ = i915_mmio_reg_offset(wa->reg);
1002 intel_uncore_forcewake_put__locked(uncore, fw);
1003 spin_unlock(&uncore->lock);
1004 intel_gt_mcr_unlock(wal->gt, flags);
1006 intel_ring_advance(rq, cs);
1008 ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
1016 gen4_gt_workarounds_init(struct intel_gt *gt,
1017 struct i915_wa_list *wal)
1019 /* WaDisable_RenderCache_OperationalFlush:gen4,ilk */
1020 wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
1024 g4x_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1026 gen4_gt_workarounds_init(gt, wal);
1028 /* WaDisableRenderCachePipelinedFlush:g4x,ilk */
1029 wa_masked_en(wal, CACHE_MODE_0, CM0_PIPELINED_RENDER_FLUSH_DISABLE);
1033 ilk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1035 g4x_gt_workarounds_init(gt, wal);
1037 wa_masked_en(wal, _3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED);
1041 snb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1046 ivb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1048 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
1050 GEN7_COMMON_SLICE_CHICKEN1,
1051 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
1053 /* WaApplyL3ControlAndL3ChickenMode:ivb */
1054 wa_write(wal, GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
1055 wa_write(wal, GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
1057 /* WaForceL3Serialization:ivb */
1058 wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
1062 vlv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1064 /* WaForceL3Serialization:vlv */
1065 wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
1068 * WaIncreaseL3CreditsForVLVB0:vlv
1069 * This is the hardware default actually.
1071 wa_write(wal, GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
1075 hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1077 /* L3 caching of data atomics doesn't work -- disable it. */
1078 wa_write(wal, HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
1081 HSW_ROW_CHICKEN3, 0,
1082 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
1083 0 /* XXX does this reg exist? */, true);
1085 /* WaVSRefCountFullforceMissDisable:hsw */
1086 wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME);
1090 gen9_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
1092 const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu;
1093 unsigned int slice, subslice;
1096 GEM_BUG_ON(GRAPHICS_VER(i915) != 9);
1099 * WaProgramMgsrForCorrectSliceSpecificMmioReads:gen9,glk,kbl,cml
1100 * Before any MMIO read into slice/subslice specific registers, MCR
1101 * packet control register needs to be programmed to point to any
1102 * enabled s/ss pair. Otherwise, incorrect values will be returned.
1103 * This means each subsequent MMIO read will be forwarded to an
1104 * specific s/ss combination, but this is OK since these registers
1105 * are consistent across s/ss in almost all cases. In the rare
1106 * occasions, such as INSTDONE, where this value is dependent
1107 * on s/ss combo, the read should be done with read_subslice_reg.
1109 slice = ffs(sseu->slice_mask) - 1;
1110 GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask.hsw));
1111 subslice = ffs(intel_sseu_get_hsw_subslices(sseu, slice));
1112 GEM_BUG_ON(!subslice);
1116 * We use GEN8_MCR..() macros to calculate the |mcr| value for
1117 * Gen9 to address WaProgramMgsrForCorrectSliceSpecificMmioReads
1119 mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
1120 mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
1122 drm_dbg(&i915->drm, "MCR slice:%d/subslice:%d = %x\n", slice, subslice, mcr);
1124 wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
1128 gen9_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1130 struct drm_i915_private *i915 = gt->i915;
1132 /* WaProgramMgsrForCorrectSliceSpecificMmioReads:glk,kbl,cml,gen9 */
1133 gen9_wa_init_mcr(i915, wal);
1135 /* WaDisableKillLogic:bxt,skl,kbl */
1136 if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915))
1141 if (HAS_LLC(i915)) {
1142 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
1144 * Must match Display Engine. See
1145 * WaCompressedResourceDisplayNewHashMode.
1149 MMCD_PCLA | MMCD_HOTSPOT_EN);
1152 /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
1155 BDW_DISABLE_HDC_INVALIDATION);
1159 skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1161 gen9_gt_workarounds_init(gt, wal);
1163 /* WaDisableGafsUnitClkGating:skl */
1166 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1168 /* WaInPlaceDecompressionHang:skl */
1169 if (IS_SKYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0))
1171 GEN9_GAMT_ECO_REG_RW_IA,
1172 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1176 kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1178 gen9_gt_workarounds_init(gt, wal);
1180 /* WaDisableDynamicCreditSharing:kbl */
1181 if (IS_KABYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, 0, STEP_C0))
1184 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
1186 /* WaDisableGafsUnitClkGating:kbl */
1189 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1191 /* WaInPlaceDecompressionHang:kbl */
1193 GEN9_GAMT_ECO_REG_RW_IA,
1194 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1198 glk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1200 gen9_gt_workarounds_init(gt, wal);
1204 cfl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1206 gen9_gt_workarounds_init(gt, wal);
1208 /* WaDisableGafsUnitClkGating:cfl */
1211 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1213 /* WaInPlaceDecompressionHang:cfl */
1215 GEN9_GAMT_ECO_REG_RW_IA,
1216 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1219 static void __set_mcr_steering(struct i915_wa_list *wal,
1220 i915_reg_t steering_reg,
1221 unsigned int slice, unsigned int subslice)
1225 mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
1226 mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
1228 wa_write_clr_set(wal, steering_reg, mcr_mask, mcr);
1231 static void debug_dump_steering(struct intel_gt *gt)
1233 struct drm_printer p = drm_debug_printer("MCR Steering:");
1235 if (drm_debug_enabled(DRM_UT_DRIVER))
1236 intel_gt_mcr_report_steering(&p, gt, false);
1239 static void __add_mcr_wa(struct intel_gt *gt, struct i915_wa_list *wal,
1240 unsigned int slice, unsigned int subslice)
1242 __set_mcr_steering(wal, GEN8_MCR_SELECTOR, slice, subslice);
1244 gt->default_steering.groupid = slice;
1245 gt->default_steering.instanceid = subslice;
1247 debug_dump_steering(gt);
1251 icl_wa_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
1253 const struct sseu_dev_info *sseu = >->info.sseu;
1254 unsigned int subslice;
1256 GEM_BUG_ON(GRAPHICS_VER(gt->i915) < 11);
1257 GEM_BUG_ON(hweight8(sseu->slice_mask) > 1);
1260 * Although a platform may have subslices, we need to always steer
1261 * reads to the lowest instance that isn't fused off. When Render
1262 * Power Gating is enabled, grabbing forcewake will only power up a
1263 * single subslice (the "minconfig") if there isn't a real workload
1264 * that needs to be run; this means that if we steer register reads to
1265 * one of the higher subslices, we run the risk of reading back 0's or
1268 subslice = __ffs(intel_sseu_get_hsw_subslices(sseu, 0));
1271 * If the subslice we picked above also steers us to a valid L3 bank,
1272 * then we can just rely on the default steering and won't need to
1273 * worry about explicitly re-steering L3BANK reads later.
1275 if (gt->info.l3bank_mask & BIT(subslice))
1276 gt->steering_table[L3BANK] = NULL;
1278 __add_mcr_wa(gt, wal, 0, subslice);
1282 xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
1284 const struct sseu_dev_info *sseu = >->info.sseu;
1285 unsigned long slice, subslice = 0, slice_mask = 0;
1290 * On Xe_HP the steering increases in complexity. There are now several
1291 * more units that require steering and we're not guaranteed to be able
1292 * to find a common setting for all of them. These are:
1293 * - GSLICE (fusable)
1294 * - DSS (sub-unit within gslice; fusable)
1295 * - L3 Bank (fusable)
1296 * - MSLICE (fusable)
1297 * - LNCF (sub-unit within mslice; always present if mslice is present)
1299 * We'll do our default/implicit steering based on GSLICE (in the
1300 * sliceid field) and DSS (in the subsliceid field). If we can
1301 * find overlap between the valid MSLICE and/or LNCF values with
1302 * a suitable GSLICE, then we can just re-use the default value and
1303 * skip and explicit steering at runtime.
1305 * We only need to look for overlap between GSLICE/MSLICE/LNCF to find
1306 * a valid sliceid value. DSS steering is the only type of steering
1307 * that utilizes the 'subsliceid' bits.
1309 * Also note that, even though the steering domain is called "GSlice"
1310 * and it is encoded in the register using the gslice format, the spec
1311 * says that the combined (geometry | compute) fuse should be used to
1312 * select the steering.
1315 /* Find the potential gslice candidates */
1316 slice_mask = intel_slicemask_from_xehp_dssmask(sseu->subslice_mask,
1317 GEN_DSS_PER_GSLICE);
1320 * Find the potential LNCF candidates. Either LNCF within a valid
1323 for_each_set_bit(i, >->info.mslice_mask, GEN12_MAX_MSLICES)
1324 lncf_mask |= (0x3 << (i * 2));
1327 * Are there any sliceid values that work for both GSLICE and LNCF
1330 if (slice_mask & lncf_mask) {
1331 slice_mask &= lncf_mask;
1332 gt->steering_table[LNCF] = NULL;
1335 /* How about sliceid values that also work for MSLICE steering? */
1336 if (slice_mask & gt->info.mslice_mask) {
1337 slice_mask &= gt->info.mslice_mask;
1338 gt->steering_table[MSLICE] = NULL;
1341 if (IS_XEHPSDV(gt->i915) && slice_mask & BIT(0))
1342 gt->steering_table[GAM] = NULL;
1344 slice = __ffs(slice_mask);
1345 subslice = intel_sseu_find_first_xehp_dss(sseu, GEN_DSS_PER_GSLICE, slice) %
1348 __add_mcr_wa(gt, wal, slice, subslice);
1351 * SQIDI ranges are special because they use different steering
1352 * registers than everything else we work with. On XeHP SDV and
1353 * DG2-G10, any value in the steering registers will work fine since
1354 * all instances are present, but DG2-G11 only has SQIDI instances at
1355 * ID's 2 and 3, so we need to steer to one of those. For simplicity
1356 * we'll just steer to a hardcoded "2" since that value will work
1359 __set_mcr_steering(wal, MCFG_MCR_SELECTOR, 0, 2);
1360 __set_mcr_steering(wal, SF_MCR_SELECTOR, 0, 2);
1363 * On DG2, GAM registers have a dedicated steering control register
1364 * and must always be programmed to a hardcoded groupid of "1."
1366 if (IS_DG2(gt->i915))
1367 __set_mcr_steering(wal, GAM_MCR_SELECTOR, 1, 0);
1371 pvc_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
1376 * Setup implicit steering for COMPUTE and DSS ranges to the first
1377 * non-fused-off DSS. All other types of MCR registers will be
1378 * explicitly steered.
1380 dss = intel_sseu_find_first_xehp_dss(>->info.sseu, 0, 0);
1381 __add_mcr_wa(gt, wal, dss / GEN_DSS_PER_CSLICE, dss % GEN_DSS_PER_CSLICE);
1385 icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1387 struct drm_i915_private *i915 = gt->i915;
1389 icl_wa_init_mcr(gt, wal);
1391 /* WaModifyGamTlbPartitioning:icl */
1392 wa_write_clr_set(wal,
1393 GEN11_GACB_PERF_CTRL,
1394 GEN11_HASH_CTRL_MASK,
1395 GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
1397 /* Wa_1405766107:icl
1398 * Formerly known as WaCL2SFHalfMaxAlloc
1402 GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
1403 GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
1406 * Formerly known as WaDisCtxReload
1409 GEN8_GAMW_ECO_DEV_RW_IA,
1410 GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
1412 /* Wa_1406463099:icl
1413 * Formerly known as WaGamTlbPendError
1417 GAMT_CHKN_DISABLE_L3_COH_PIPE);
1420 * Wa_1408615072:icl,ehl (vsunit)
1421 * Wa_1407596294:icl,ehl (hsunit)
1423 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
1424 VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
1426 /* Wa_1407352427:icl,ehl */
1427 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
1428 PSDUNIT_CLKGATE_DIS);
1430 /* Wa_1406680159:icl,ehl */
1431 wa_mcr_write_or(wal,
1432 GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE,
1433 GWUNIT_CLKGATE_DIS);
1435 /* Wa_1607087056:icl,ehl,jsl */
1436 if (IS_ICELAKE(i915) ||
1437 ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
1438 IS_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)))
1440 GEN11_SLICE_UNIT_LEVEL_CLKGATE,
1441 L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
1444 * This is not a documented workaround, but rather an optimization
1445 * to reduce sampler power.
1447 wa_mcr_write_clr(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
1451 * Though there are per-engine instances of these registers,
1452 * they retain their value through engine resets and should
1453 * only be provided on the GT workaround list rather than
1454 * the engine-specific workaround list.
1457 wa_14011060649(struct intel_gt *gt, struct i915_wa_list *wal)
1459 struct intel_engine_cs *engine;
1462 for_each_engine(engine, gt, id) {
1463 if (engine->class != VIDEO_DECODE_CLASS ||
1464 (engine->instance % 2))
1467 wa_write_or(wal, VDBOX_CGCTL3F10(engine->mmio_base),
1468 IECPUNIT_CLKGATE_DIS);
1473 gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1475 icl_wa_init_mcr(gt, wal);
1477 /* Wa_14011060649:tgl,rkl,dg1,adl-s,adl-p */
1478 wa_14011060649(gt, wal);
1480 /* Wa_14011059788:tgl,rkl,adl-s,dg1,adl-p */
1481 wa_mcr_write_or(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
1486 * Firmware on some gen12 platforms locks the MISCCPCTL register,
1487 * preventing i915 from modifying it for this workaround. Skip the
1488 * readback verification for this workaround on debug builds; if the
1489 * workaround doesn't stick due to firmware behavior, it's not an error
1490 * that we want CI to flag.
1492 wa_add(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE,
1497 dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1499 gen12_gt_workarounds_init(gt, wal);
1501 /* Wa_1409420604:dg1 */
1502 wa_mcr_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE2,
1503 CPSSUNIT_CLKGATE_DIS);
1505 /* Wa_1408615072:dg1 */
1506 /* Empirical testing shows this register is unaffected by engine reset. */
1507 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL);
1511 xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1513 struct drm_i915_private *i915 = gt->i915;
1515 xehp_init_mcr(gt, wal);
1517 /* Wa_1409757795:xehpsdv */
1518 wa_mcr_write_or(wal, SCCGCTL94DC, CG3DDISURB);
1520 /* Wa_18011725039:xehpsdv */
1521 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) {
1522 wa_mcr_masked_dis(wal, MLTICTXCTL, TDONRENDER);
1523 wa_mcr_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH);
1526 /* Wa_16011155590:xehpsdv */
1527 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
1528 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
1529 TSGUNIT_CLKGATE_DIS);
1531 /* Wa_14011780169:xehpsdv */
1532 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_B0, STEP_FOREVER)) {
1533 wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS |
1534 GAMTLBVDBOX7_CLKGATE_DIS |
1535 GAMTLBVDBOX6_CLKGATE_DIS |
1536 GAMTLBVDBOX5_CLKGATE_DIS |
1537 GAMTLBVDBOX4_CLKGATE_DIS |
1538 GAMTLBVDBOX3_CLKGATE_DIS |
1539 GAMTLBVDBOX2_CLKGATE_DIS |
1540 GAMTLBVDBOX1_CLKGATE_DIS |
1541 GAMTLBVDBOX0_CLKGATE_DIS |
1542 GAMTLBKCR_CLKGATE_DIS |
1543 GAMTLBGUC_CLKGATE_DIS |
1544 GAMTLBBLT_CLKGATE_DIS);
1545 wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS |
1546 GAMTLBGFXA1_CLKGATE_DIS |
1547 GAMTLBCOMPA0_CLKGATE_DIS |
1548 GAMTLBCOMPA1_CLKGATE_DIS |
1549 GAMTLBCOMPB0_CLKGATE_DIS |
1550 GAMTLBCOMPB1_CLKGATE_DIS |
1551 GAMTLBCOMPC0_CLKGATE_DIS |
1552 GAMTLBCOMPC1_CLKGATE_DIS |
1553 GAMTLBCOMPD0_CLKGATE_DIS |
1554 GAMTLBCOMPD1_CLKGATE_DIS |
1555 GAMTLBMERT_CLKGATE_DIS |
1556 GAMTLBVEBOX3_CLKGATE_DIS |
1557 GAMTLBVEBOX2_CLKGATE_DIS |
1558 GAMTLBVEBOX1_CLKGATE_DIS |
1559 GAMTLBVEBOX0_CLKGATE_DIS);
1562 /* Wa_16012725990:xehpsdv */
1563 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_FOREVER))
1564 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, VFUNIT_CLKGATE_DIS);
1566 /* Wa_14011060649:xehpsdv */
1567 wa_14011060649(gt, wal);
1569 /* Wa_14012362059:xehpsdv */
1570 wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB);
1572 /* Wa_14014368820:xehpsdv */
1573 wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
1574 INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
1576 /* Wa_14010670810:xehpsdv */
1577 wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
1581 dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1583 xehp_init_mcr(gt, wal);
1585 /* Wa_14011060649:dg2 */
1586 wa_14011060649(gt, wal);
1588 if (IS_DG2_G10(gt->i915)) {
1589 /* Wa_22010523718:dg2 */
1590 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
1591 CG3DDISCFEG_CLKGATE_DIS);
1593 /* Wa_14011006942:dg2 */
1594 wa_mcr_write_or(wal, GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE,
1595 DSS_ROUTER_CLKGATE_DIS);
1598 /* Wa_14014830051:dg2 */
1599 wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
1603 * Skip verification for possibly locked register.
1605 wa_add(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE,
1608 /* Wa_18018781329 */
1609 wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
1610 wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
1611 wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
1612 wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
1614 /* Wa_1509235366:dg2 */
1615 wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
1616 INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
1618 /* Wa_14010648519:dg2 */
1619 wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
1623 pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1625 pvc_init_mcr(gt, wal);
1627 /* Wa_14015795083 */
1628 wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
1630 /* Wa_18018781329 */
1631 wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
1632 wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
1633 wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
1634 wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
1636 /* Wa_16016694945 */
1637 wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
1641 xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1643 /* Wa_14018778641 / Wa_18018781329 */
1644 wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
1646 /* Wa_22016670082 */
1647 wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE);
1649 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
1650 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
1651 /* Wa_14014830051 */
1652 wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
1654 /* Wa_14015795083 */
1655 wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
1659 * Unlike older platforms, we no longer setup implicit steering here;
1660 * all MCR accesses are explicitly steered.
1662 debug_dump_steering(gt);
1666 wa_16021867713(struct intel_gt *gt, struct i915_wa_list *wal)
1668 struct intel_engine_cs *engine;
1671 for_each_engine(engine, gt, id)
1672 if (engine->class == VIDEO_DECODE_CLASS)
1673 wa_write_or(wal, VDBOX_CGCTL3F1C(engine->mmio_base),
1674 MFXPIPE_CLKGATE_DIS);
1678 xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1680 wa_16021867713(gt, wal);
1686 * Note that although these registers are MCR on the primary
1687 * GT, the media GT's versions are regular singleton registers.
1689 wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB);
1691 /* Wa_22016670082 */
1692 wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE);
1694 debug_dump_steering(gt);
1698 * The bspec performance guide has recommended MMIO tuning settings. These
1699 * aren't truly "workarounds" but we want to program them through the
1700 * workaround infrastructure to make sure they're (re)applied at the proper
1703 * The programming in this function is for settings that persist through
1704 * engine resets and also are not part of any engine's register state context.
1705 * I.e., settings that only need to be re-applied in the event of a full GT
1708 static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal)
1710 if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) {
1711 wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
1712 wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
1715 if (IS_PONTEVECCHIO(gt->i915)) {
1716 wa_mcr_write(wal, XEHPC_L3SCRUB,
1717 SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
1718 wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_HOSTCACHEEN);
1721 if (IS_DG2(gt->i915)) {
1722 wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
1723 wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
1728 gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
1730 struct drm_i915_private *i915 = gt->i915;
1732 gt_tuning_settings(gt, wal);
1734 if (gt->type == GT_MEDIA) {
1735 if (MEDIA_VER_FULL(i915) == IP_VER(13, 0))
1736 xelpmp_gt_workarounds_init(gt, wal);
1738 MISSING_CASE(MEDIA_VER_FULL(i915));
1743 if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)))
1744 xelpg_gt_workarounds_init(gt, wal);
1745 else if (IS_PONTEVECCHIO(i915))
1746 pvc_gt_workarounds_init(gt, wal);
1747 else if (IS_DG2(i915))
1748 dg2_gt_workarounds_init(gt, wal);
1749 else if (IS_XEHPSDV(i915))
1750 xehpsdv_gt_workarounds_init(gt, wal);
1751 else if (IS_DG1(i915))
1752 dg1_gt_workarounds_init(gt, wal);
1753 else if (GRAPHICS_VER(i915) == 12)
1754 gen12_gt_workarounds_init(gt, wal);
1755 else if (GRAPHICS_VER(i915) == 11)
1756 icl_gt_workarounds_init(gt, wal);
1757 else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
1758 cfl_gt_workarounds_init(gt, wal);
1759 else if (IS_GEMINILAKE(i915))
1760 glk_gt_workarounds_init(gt, wal);
1761 else if (IS_KABYLAKE(i915))
1762 kbl_gt_workarounds_init(gt, wal);
1763 else if (IS_BROXTON(i915))
1764 gen9_gt_workarounds_init(gt, wal);
1765 else if (IS_SKYLAKE(i915))
1766 skl_gt_workarounds_init(gt, wal);
1767 else if (IS_HASWELL(i915))
1768 hsw_gt_workarounds_init(gt, wal);
1769 else if (IS_VALLEYVIEW(i915))
1770 vlv_gt_workarounds_init(gt, wal);
1771 else if (IS_IVYBRIDGE(i915))
1772 ivb_gt_workarounds_init(gt, wal);
1773 else if (GRAPHICS_VER(i915) == 6)
1774 snb_gt_workarounds_init(gt, wal);
1775 else if (GRAPHICS_VER(i915) == 5)
1776 ilk_gt_workarounds_init(gt, wal);
1777 else if (IS_G4X(i915))
1778 g4x_gt_workarounds_init(gt, wal);
1779 else if (GRAPHICS_VER(i915) == 4)
1780 gen4_gt_workarounds_init(gt, wal);
1781 else if (GRAPHICS_VER(i915) <= 8)
1784 MISSING_CASE(GRAPHICS_VER(i915));
1787 void intel_gt_init_workarounds(struct intel_gt *gt)
1789 struct i915_wa_list *wal = >->wa_list;
1791 wa_init_start(wal, gt, "GT", "global");
1792 gt_init_workarounds(gt, wal);
1793 wa_init_finish(wal);
1797 wa_verify(struct intel_gt *gt, const struct i915_wa *wa, u32 cur,
1798 const char *name, const char *from)
1800 if ((cur ^ wa->set) & wa->read) {
1802 "%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n",
1803 name, from, i915_mmio_reg_offset(wa->reg),
1804 cur, cur & wa->read, wa->set & wa->read);
1812 static void wa_list_apply(const struct i915_wa_list *wal)
1814 struct intel_gt *gt = wal->gt;
1815 struct intel_uncore *uncore = gt->uncore;
1816 enum forcewake_domains fw;
1817 unsigned long flags;
1824 fw = wal_get_fw_for_rmw(uncore, wal);
1826 intel_gt_mcr_lock(gt, &flags);
1827 spin_lock(&uncore->lock);
1828 intel_uncore_forcewake_get__locked(uncore, fw);
1830 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
1833 /* open-coded rmw due to steering */
1836 intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
1837 intel_uncore_read_fw(uncore, wa->reg);
1838 val = (old & ~wa->clr) | wa->set;
1839 if (val != old || !wa->clr) {
1841 intel_gt_mcr_multicast_write_fw(gt, wa->mcr_reg, val);
1843 intel_uncore_write_fw(uncore, wa->reg, val);
1846 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
1847 u32 val = wa->is_mcr ?
1848 intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
1849 intel_uncore_read_fw(uncore, wa->reg);
1851 wa_verify(gt, wa, val, wal->name, "application");
1855 intel_uncore_forcewake_put__locked(uncore, fw);
1856 spin_unlock(&uncore->lock);
1857 intel_gt_mcr_unlock(gt, flags);
1860 void intel_gt_apply_workarounds(struct intel_gt *gt)
1862 wa_list_apply(>->wa_list);
1865 static bool wa_list_verify(struct intel_gt *gt,
1866 const struct i915_wa_list *wal,
1869 struct intel_uncore *uncore = gt->uncore;
1871 enum forcewake_domains fw;
1872 unsigned long flags;
1876 fw = wal_get_fw_for_rmw(uncore, wal);
1878 intel_gt_mcr_lock(gt, &flags);
1879 spin_lock(&uncore->lock);
1880 intel_uncore_forcewake_get__locked(uncore, fw);
1882 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1883 ok &= wa_verify(wal->gt, wa, wa->is_mcr ?
1884 intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
1885 intel_uncore_read_fw(uncore, wa->reg),
1888 intel_uncore_forcewake_put__locked(uncore, fw);
1889 spin_unlock(&uncore->lock);
1890 intel_gt_mcr_unlock(gt, flags);
1895 bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
1897 return wa_list_verify(gt, >->wa_list, from);
1901 static bool is_nonpriv_flags_valid(u32 flags)
1903 /* Check only valid flag bits are set */
1904 if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
1907 /* NB: Only 3 out of 4 enum values are valid for access field */
1908 if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
1909 RING_FORCE_TO_NONPRIV_ACCESS_INVALID)
1916 whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
1918 struct i915_wa wa = {
1922 if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
1925 if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
1928 wa.reg.reg |= flags;
1933 whitelist_mcr_reg_ext(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 flags)
1935 struct i915_wa wa = {
1940 if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
1943 if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
1946 wa.mcr_reg.reg |= flags;
1951 whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
1953 whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
1957 whitelist_mcr_reg(struct i915_wa_list *wal, i915_mcr_reg_t reg)
1959 whitelist_mcr_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
1962 static void gen9_whitelist_build(struct i915_wa_list *w)
1964 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
1965 whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
1967 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
1968 whitelist_reg(w, GEN8_CS_CHICKEN1);
1970 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
1971 whitelist_reg(w, GEN8_HDC_CHICKEN1);
1973 /* WaSendPushConstantsFromMMIO:skl,bxt */
1974 whitelist_reg(w, COMMON_SLICE_CHICKEN2);
1977 static void skl_whitelist_build(struct intel_engine_cs *engine)
1979 struct i915_wa_list *w = &engine->whitelist;
1981 if (engine->class != RENDER_CLASS)
1984 gen9_whitelist_build(w);
1986 /* WaDisableLSQCROPERFforOCL:skl */
1987 whitelist_mcr_reg(w, GEN8_L3SQCREG4);
1990 static void bxt_whitelist_build(struct intel_engine_cs *engine)
1992 if (engine->class != RENDER_CLASS)
1995 gen9_whitelist_build(&engine->whitelist);
1998 static void kbl_whitelist_build(struct intel_engine_cs *engine)
2000 struct i915_wa_list *w = &engine->whitelist;
2002 if (engine->class != RENDER_CLASS)
2005 gen9_whitelist_build(w);
2007 /* WaDisableLSQCROPERFforOCL:kbl */
2008 whitelist_mcr_reg(w, GEN8_L3SQCREG4);
2011 static void glk_whitelist_build(struct intel_engine_cs *engine)
2013 struct i915_wa_list *w = &engine->whitelist;
2015 if (engine->class != RENDER_CLASS)
2018 gen9_whitelist_build(w);
2020 /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
2021 whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
2024 static void cfl_whitelist_build(struct intel_engine_cs *engine)
2026 struct i915_wa_list *w = &engine->whitelist;
2028 if (engine->class != RENDER_CLASS)
2031 gen9_whitelist_build(w);
2034 * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
2036 * This covers 4 register which are next to one another :
2037 * - PS_INVOCATION_COUNT
2038 * - PS_INVOCATION_COUNT_UDW
2040 * - PS_DEPTH_COUNT_UDW
2042 whitelist_reg_ext(w, PS_INVOCATION_COUNT,
2043 RING_FORCE_TO_NONPRIV_ACCESS_RD |
2044 RING_FORCE_TO_NONPRIV_RANGE_4);
2047 static void allow_read_ctx_timestamp(struct intel_engine_cs *engine)
2049 struct i915_wa_list *w = &engine->whitelist;
2051 if (engine->class != RENDER_CLASS)
2052 whitelist_reg_ext(w,
2053 RING_CTX_TIMESTAMP(engine->mmio_base),
2054 RING_FORCE_TO_NONPRIV_ACCESS_RD);
2057 static void cml_whitelist_build(struct intel_engine_cs *engine)
2059 allow_read_ctx_timestamp(engine);
2061 cfl_whitelist_build(engine);
2064 static void icl_whitelist_build(struct intel_engine_cs *engine)
2066 struct i915_wa_list *w = &engine->whitelist;
2068 allow_read_ctx_timestamp(engine);
2070 switch (engine->class) {
2072 /* WaAllowUMDToModifyHalfSliceChicken7:icl */
2073 whitelist_mcr_reg(w, GEN9_HALF_SLICE_CHICKEN7);
2075 /* WaAllowUMDToModifySamplerMode:icl */
2076 whitelist_mcr_reg(w, GEN10_SAMPLER_MODE);
2078 /* WaEnableStateCacheRedirectToCS:icl */
2079 whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
2082 * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
2084 * This covers 4 register which are next to one another :
2085 * - PS_INVOCATION_COUNT
2086 * - PS_INVOCATION_COUNT_UDW
2088 * - PS_DEPTH_COUNT_UDW
2090 whitelist_reg_ext(w, PS_INVOCATION_COUNT,
2091 RING_FORCE_TO_NONPRIV_ACCESS_RD |
2092 RING_FORCE_TO_NONPRIV_RANGE_4);
2095 case VIDEO_DECODE_CLASS:
2096 /* hucStatusRegOffset */
2097 whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
2098 RING_FORCE_TO_NONPRIV_ACCESS_RD);
2099 /* hucUKernelHdrInfoRegOffset */
2100 whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
2101 RING_FORCE_TO_NONPRIV_ACCESS_RD);
2102 /* hucStatus2RegOffset */
2103 whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
2104 RING_FORCE_TO_NONPRIV_ACCESS_RD);
2112 static void tgl_whitelist_build(struct intel_engine_cs *engine)
2114 struct i915_wa_list *w = &engine->whitelist;
2116 allow_read_ctx_timestamp(engine);
2118 switch (engine->class) {
2121 * WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl
2124 * This covers 4 registers which are next to one another :
2125 * - PS_INVOCATION_COUNT
2126 * - PS_INVOCATION_COUNT_UDW
2128 * - PS_DEPTH_COUNT_UDW
2130 whitelist_reg_ext(w, PS_INVOCATION_COUNT,
2131 RING_FORCE_TO_NONPRIV_ACCESS_RD |
2132 RING_FORCE_TO_NONPRIV_RANGE_4);
2136 * Wa_14012131227:dg1
2137 * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p
2139 whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
2141 /* Wa_1806527549:tgl */
2142 whitelist_reg(w, HIZ_CHICKEN);
2144 /* Required by recommended tuning setting (not a workaround) */
2145 whitelist_reg(w, GEN11_COMMON_SLICE_CHICKEN3);
2153 static void dg2_whitelist_build(struct intel_engine_cs *engine)
2155 struct i915_wa_list *w = &engine->whitelist;
2157 switch (engine->class) {
2159 /* Required by recommended tuning setting (not a workaround) */
2160 whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
2168 static void blacklist_trtt(struct intel_engine_cs *engine)
2170 struct i915_wa_list *w = &engine->whitelist;
2173 * Prevent read/write access to [0x4400, 0x4600) which covers
2174 * the TRTT range across all engines. Note that normally userspace
2175 * cannot access the other engines' trtt control, but for simplicity
2176 * we cover the entire range on each engine.
2178 whitelist_reg_ext(w, _MMIO(0x4400),
2179 RING_FORCE_TO_NONPRIV_DENY |
2180 RING_FORCE_TO_NONPRIV_RANGE_64);
2181 whitelist_reg_ext(w, _MMIO(0x4500),
2182 RING_FORCE_TO_NONPRIV_DENY |
2183 RING_FORCE_TO_NONPRIV_RANGE_64);
2186 static void pvc_whitelist_build(struct intel_engine_cs *engine)
2188 /* Wa_16014440446:pvc */
2189 blacklist_trtt(engine);
2192 static void xelpg_whitelist_build(struct intel_engine_cs *engine)
2194 struct i915_wa_list *w = &engine->whitelist;
2196 switch (engine->class) {
2198 /* Required by recommended tuning setting (not a workaround) */
2199 whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
2207 void intel_engine_init_whitelist(struct intel_engine_cs *engine)
2209 struct drm_i915_private *i915 = engine->i915;
2210 struct i915_wa_list *w = &engine->whitelist;
2212 wa_init_start(w, engine->gt, "whitelist", engine->name);
2214 if (engine->gt->type == GT_MEDIA)
2216 else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71)))
2217 xelpg_whitelist_build(engine);
2218 else if (IS_PONTEVECCHIO(i915))
2219 pvc_whitelist_build(engine);
2220 else if (IS_DG2(i915))
2221 dg2_whitelist_build(engine);
2222 else if (IS_XEHPSDV(i915))
2224 else if (GRAPHICS_VER(i915) == 12)
2225 tgl_whitelist_build(engine);
2226 else if (GRAPHICS_VER(i915) == 11)
2227 icl_whitelist_build(engine);
2228 else if (IS_COMETLAKE(i915))
2229 cml_whitelist_build(engine);
2230 else if (IS_COFFEELAKE(i915))
2231 cfl_whitelist_build(engine);
2232 else if (IS_GEMINILAKE(i915))
2233 glk_whitelist_build(engine);
2234 else if (IS_KABYLAKE(i915))
2235 kbl_whitelist_build(engine);
2236 else if (IS_BROXTON(i915))
2237 bxt_whitelist_build(engine);
2238 else if (IS_SKYLAKE(i915))
2239 skl_whitelist_build(engine);
2240 else if (GRAPHICS_VER(i915) <= 8)
2243 MISSING_CASE(GRAPHICS_VER(i915));
2248 void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
2250 const struct i915_wa_list *wal = &engine->whitelist;
2251 struct intel_uncore *uncore = engine->uncore;
2252 const u32 base = engine->mmio_base;
2259 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
2260 intel_uncore_write(uncore,
2261 RING_FORCE_TO_NONPRIV(base, i),
2262 i915_mmio_reg_offset(wa->reg));
2264 /* And clear the rest just in case of garbage */
2265 for (; i < RING_MAX_NONPRIV_SLOTS; i++)
2266 intel_uncore_write(uncore,
2267 RING_FORCE_TO_NONPRIV(base, i),
2268 i915_mmio_reg_offset(RING_NOPID(base)));
2272 * engine_fake_wa_init(), a place holder to program the registers
2273 * which are not part of an official workaround defined by the
2275 * Adding programming of those register inside workaround will
2276 * allow utilizing wa framework to proper application and verification.
2279 engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2284 * RING_CMD_CCTL specifies the default MOCS entry that will be used
2285 * by the command streamer when executing commands that don't have
2286 * a way to explicitly specify a MOCS setting. The default should
2287 * usually reference whichever MOCS entry corresponds to uncached
2288 * behavior, although use of a WB cached entry is recommended by the
2289 * spec in certain circumstances on specific platforms.
2291 if (GRAPHICS_VER(engine->i915) >= 12) {
2292 mocs_r = engine->gt->mocs.uc_index;
2293 mocs_w = engine->gt->mocs.uc_index;
2295 if (HAS_L3_CCS_READ(engine->i915) &&
2296 engine->class == COMPUTE_CLASS) {
2297 mocs_r = engine->gt->mocs.wb_index;
2300 * Even on the few platforms where MOCS 0 is a
2301 * legitimate table entry, it's never the correct
2302 * setting to use here; we can assume the MOCS init
2303 * just forgot to initialize wb_index.
2305 drm_WARN_ON(&engine->i915->drm, mocs_r == 0);
2308 wa_masked_field_set(wal,
2309 RING_CMD_CCTL(engine->mmio_base),
2311 CMD_CCTL_MOCS_OVERRIDE(mocs_w, mocs_r));
2316 rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2318 struct drm_i915_private *i915 = engine->i915;
2319 struct intel_gt *gt = engine->gt;
2321 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2322 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
2323 /* Wa_22014600077 */
2324 wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
2325 ENABLE_EU_COUNT_FOR_TDL_FLUSH);
2328 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2329 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
2332 wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
2333 SC_DISABLE_POWER_OPTIMIZATION_EBB);
2336 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2338 /* Wa_22012856258 */
2339 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
2340 GEN12_DISABLE_READ_SUPPRESSION);
2345 * Wa_22010960976:dg2
2346 * Wa_14013347512:dg2
2348 wa_mcr_masked_dis(wal, XEHP_HDC_CHICKEN0,
2349 LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK);
2352 if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) ||
2354 /* Wa_14015150844 */
2355 wa_mcr_add(wal, XEHP_HDC_CHICKEN0, 0,
2356 _MASKED_BIT_ENABLE(DIS_ATOMIC_CHAINING_TYPED_WRITES),
2360 if (IS_DG2(i915) || IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
2361 IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2363 * Wa_1606700617:tgl,dg1,adl-p
2364 * Wa_22010271021:tgl,rkl,dg1,adl-s,adl-p
2365 * Wa_14010826681:tgl,dg1,rkl,adl-p
2366 * Wa_18019627453:dg2
2369 GEN9_CS_DEBUG_MODE1,
2370 FF_DOP_CLOCK_GATE_DISABLE);
2373 if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || IS_DG1(i915) ||
2374 IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2375 /* Wa_1606931601:tgl,rkl,dg1,adl-s,adl-p */
2376 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
2379 * Wa_1407928979:tgl A*
2380 * Wa_18011464164:tgl[B0+],dg1[B0+]
2381 * Wa_22010931296:tgl[B0+],dg1[B0+]
2382 * Wa_14010919138:rkl,dg1,adl-s,adl-p
2384 wa_write_or(wal, GEN7_FF_THREAD_MODE,
2385 GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
2387 /* Wa_1406941453:tgl,rkl,dg1,adl-s,adl-p */
2388 wa_mcr_masked_en(wal,
2393 if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
2394 IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2396 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
2397 GEN12_PUSH_CONST_DEREF_HOLD_DIS);
2399 /* Wa_14010229206 */
2400 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
2403 if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) {
2407 * On TGL and RKL there are multiple entries for this WA in the
2408 * BSpec; some indicate this is an A0-only WA, others indicate
2409 * it applies to all steppings so we trust the "all steppings."
2412 RING_PSMI_CTL(RENDER_RING_BASE),
2413 GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
2414 GEN8_RC_SEMA_IDLE_MSG_DISABLE);
2417 if (GRAPHICS_VER(i915) == 11) {
2418 /* This is not an Wa. Enable for better image quality */
2421 _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
2425 * Formerly known as WaGAPZPriorityScheme
2429 GEN11_ARBITRATION_PRIO_ORDER_MASK);
2433 * Formerly known as WaL3BankAddressHashing
2435 wa_write_clr_set(wal,
2437 GEN11_HASH_CTRL_EXCL_MASK,
2438 GEN11_HASH_CTRL_EXCL_BIT0);
2439 wa_write_clr_set(wal,
2441 GEN11_BANK_HASH_ADDR_EXCL_MASK,
2442 GEN11_BANK_HASH_ADDR_EXCL_BIT0);
2446 * Formerly known as WaDisableCleanEvicts
2448 wa_mcr_write_or(wal,
2450 GEN11_LQSC_CLEAN_EVICT_DISABLE);
2452 /* Wa_1606682166:icl */
2455 GEN7_DISABLE_SAMPLER_PREFETCH);
2457 /* Wa_1409178092:icl */
2458 wa_mcr_write_clr_set(wal,
2460 GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
2463 /* WaEnable32PlaneMode:icl */
2464 wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
2465 GEN11_ENABLE_32_PLANE_MODE);
2468 * Wa_1408767742:icl[a2..forever],ehl[all]
2469 * Wa_1605460711:icl[a0..c0]
2472 GEN7_FF_THREAD_MODE,
2473 GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
2475 /* Wa_22010271021 */
2477 GEN9_CS_DEBUG_MODE1,
2478 FF_DOP_CLOCK_GATE_DISABLE);
2482 * Intel platforms that support fine-grained preemption (i.e., gen9 and
2483 * beyond) allow the kernel-mode driver to choose between two different
2484 * options for controlling preemption granularity and behavior.
2486 * Option 1 (hardware default):
2487 * Preemption settings are controlled in a global manner via
2488 * kernel-only register CS_DEBUG_MODE1 (0x20EC). Any granularity
2489 * and settings chosen by the kernel-mode driver will apply to all
2490 * userspace clients.
2493 * Preemption settings are controlled on a per-context basis via
2494 * register CS_CHICKEN1 (0x2580). CS_CHICKEN1 is saved/restored on
2495 * context switch and is writable by userspace (e.g., via
2496 * MI_LOAD_REGISTER_IMMEDIATE instructions placed in a batch buffer)
2497 * which allows different userspace drivers/clients to select
2498 * different settings, or to change those settings on the fly in
2499 * response to runtime needs. This option was known by name
2500 * "FtrPerCtxtPreemptionGranularityControl" at one time, although
2501 * that name is somewhat misleading as other non-granularity
2502 * preemption settings are also impacted by this decision.
2504 * On Linux, our policy has always been to let userspace drivers
2505 * control preemption granularity/settings (Option 2). This was
2506 * originally mandatory on gen9 to prevent ABI breakage (old gen9
2507 * userspace developed before object-level preemption was enabled would
2508 * not behave well if i915 were to go with Option 1 and enable that
2509 * preemption in a global manner). On gen9 each context would have
2510 * object-level preemption disabled by default (see
2511 * WaDisable3DMidCmdPreemption in gen9_ctx_workarounds_init), but
2512 * userspace drivers could opt-in to object-level preemption as they
2513 * saw fit. For post-gen9 platforms, we continue to utilize Option 2;
2514 * even though it is no longer necessary for ABI compatibility when
2515 * enabling a new platform, it does ensure that userspace will be able
2516 * to implement any workarounds that show up requiring temporary
2517 * adjustments to preemption behavior at runtime.
2519 * Notes/Workarounds:
2520 * - Wa_14015141709: On DG2 and early steppings of MTL,
2521 * CS_CHICKEN1[0] does not disable object-level preemption as
2522 * it is supposed to (nor does CS_DEBUG_MODE1[0] if we had been
2523 * using Option 1). Effectively this means userspace is unable
2524 * to disable object-level preemption on these platforms/steppings
2525 * despite the setting here.
2527 * - Wa_16013994831: May require that userspace program
2528 * CS_CHICKEN1[10] when certain runtime conditions are true.
2529 * Userspace requires Option 2 to be in effect for their update of
2530 * CS_CHICKEN1[10] to be effective.
2532 * Other workarounds may appear in the future that will also require
2533 * Option 2 behavior to allow proper userspace implementation.
2535 if (GRAPHICS_VER(i915) >= 9)
2537 GEN7_FF_SLICE_CS_CHICKEN1,
2538 GEN9_FFSC_PERCTX_PREEMPT_CTRL);
2540 if (IS_SKYLAKE(i915) ||
2541 IS_KABYLAKE(i915) ||
2542 IS_COFFEELAKE(i915) ||
2543 IS_COMETLAKE(i915)) {
2544 /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
2547 GEN9_GAPS_TSV_CREDIT_DISABLE);
2550 if (IS_BROXTON(i915)) {
2551 /* WaDisablePooledEuLoadBalancingFix:bxt */
2553 FF_SLICE_CS_CHICKEN2,
2554 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
2557 if (GRAPHICS_VER(i915) == 9) {
2558 /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
2560 GEN9_CSFE_CHICKEN1_RCS,
2561 GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
2563 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
2564 wa_mcr_write_or(wal,
2566 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
2568 /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
2569 if (IS_GEN9_LP(i915))
2570 wa_mcr_write_clr_set(wal,
2572 L3_PRIO_CREDITS_MASK,
2573 L3_GENERAL_PRIO_CREDITS(62) |
2574 L3_HIGH_PRIO_CREDITS(2));
2576 /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
2577 wa_mcr_write_or(wal,
2579 GEN8_LQSC_FLUSH_COHERENT_LINES);
2581 /* Disable atomics in L3 to prevent unrecoverable hangs */
2582 wa_write_clr_set(wal, GEN9_SCRATCH_LNCF1,
2583 GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE, 0);
2584 wa_mcr_write_clr_set(wal, GEN8_L3SQCREG4,
2585 GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE, 0);
2586 wa_mcr_write_clr_set(wal, GEN9_SCRATCH1,
2587 EVICTION_PERF_FIX_ENABLE, 0);
2590 if (IS_HASWELL(i915)) {
2591 /* WaSampleCChickenBitEnable:hsw */
2593 HSW_HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
2597 /* enable HiZ Raw Stall Optimization */
2598 HIZ_RAW_STALL_OPT_DISABLE);
2601 if (IS_VALLEYVIEW(i915)) {
2602 /* WaDisableEarlyCull:vlv */
2605 _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
2608 * WaVSThreadDispatchOverride:ivb,vlv
2610 * This actually overrides the dispatch
2611 * mode for all thread types.
2613 wa_write_clr_set(wal,
2614 GEN7_FF_THREAD_MODE,
2616 GEN7_FF_TS_SCHED_HW |
2617 GEN7_FF_VS_SCHED_HW |
2618 GEN7_FF_DS_SCHED_HW);
2620 /* WaPsdDispatchEnable:vlv */
2621 /* WaDisablePSDDualDispatchEnable:vlv */
2623 GEN7_HALF_SLICE_CHICKEN1,
2624 GEN7_MAX_PS_THREAD_DEP |
2625 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
2628 if (IS_IVYBRIDGE(i915)) {
2629 /* WaDisableEarlyCull:ivb */
2632 _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
2634 if (0) { /* causes HiZ corruption on ivb:gt1 */
2635 /* enable HiZ Raw Stall Optimization */
2638 HIZ_RAW_STALL_OPT_DISABLE);
2642 * WaVSThreadDispatchOverride:ivb,vlv
2644 * This actually overrides the dispatch
2645 * mode for all thread types.
2647 wa_write_clr_set(wal,
2648 GEN7_FF_THREAD_MODE,
2650 GEN7_FF_TS_SCHED_HW |
2651 GEN7_FF_VS_SCHED_HW |
2652 GEN7_FF_DS_SCHED_HW);
2654 /* WaDisablePSDDualDispatchEnable:ivb */
2655 if (IS_IVB_GT1(i915))
2657 GEN7_HALF_SLICE_CHICKEN1,
2658 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
2661 if (GRAPHICS_VER(i915) == 7) {
2662 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
2664 RING_MODE_GEN7(RENDER_RING_BASE),
2665 GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
2667 /* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */
2668 wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
2671 * BSpec says this must be set, even though
2672 * WaDisable4x2SubspanOptimization:ivb,hsw
2673 * WaDisable4x2SubspanOptimization isn't listed for VLV.
2677 PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
2680 * BSpec recommends 8x4 when MSAA is used,
2681 * however in practice 16x4 seems fastest.
2683 * Note that PS/WM thread counts depend on the WIZ hashing
2684 * disable bit, which we don't touch here, but it's good
2685 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
2687 wa_masked_field_set(wal,
2689 GEN6_WIZ_HASHING_MASK,
2690 GEN6_WIZ_HASHING_16x4);
2693 if (IS_GRAPHICS_VER(i915, 6, 7))
2695 * We need to disable the AsyncFlip performance optimisations in
2696 * order to use MI_WAIT_FOR_EVENT within the CS. It should
2697 * already be programmed to '1' on all products.
2699 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
2702 RING_MI_MODE(RENDER_RING_BASE),
2703 ASYNC_FLIP_PERF_DISABLE);
2705 if (GRAPHICS_VER(i915) == 6) {
2707 * Required for the hardware to program scanline values for
2709 * WaEnableFlushTlbInvalidationMode:snb
2713 GFX_TLB_INVALIDATE_EXPLICIT);
2715 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
2718 _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
2722 /* WaStripsFansDisableFastClipPerformanceFix:snb */
2723 _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
2726 * "This bit must be set if 3DSTATE_CLIP clip mode is set
2727 * to normal and 3DSTATE_SF number of SF output attributes
2730 _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
2733 * BSpec recommends 8x4 when MSAA is used,
2734 * however in practice 16x4 seems fastest.
2736 * Note that PS/WM thread counts depend on the WIZ hashing
2737 * disable bit, which we don't touch here, but it's good
2738 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
2740 wa_masked_field_set(wal,
2742 GEN6_WIZ_HASHING_MASK,
2743 GEN6_WIZ_HASHING_16x4);
2745 /* WaDisable_RenderCache_OperationalFlush:snb */
2746 wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
2749 * From the Sandybridge PRM, volume 1 part 3, page 24:
2750 * "If this bit is set, STCunit will have LRA as replacement
2751 * policy. [...] This bit must be reset. LRA replacement
2752 * policy is not supported."
2756 CM0_STC_EVICT_DISABLE_LRA_SNB);
2759 if (IS_GRAPHICS_VER(i915, 4, 6))
2760 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
2761 wa_add(wal, RING_MI_MODE(RENDER_RING_BASE),
2762 0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
2763 /* XXX bit doesn't stick on Broadwater */
2764 IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH, true);
2766 if (GRAPHICS_VER(i915) == 4)
2768 * Disable CONSTANT_BUFFER before it is loaded from the context
2769 * image. For as it is loaded, it is executed and the stored
2770 * address may no longer be valid, leading to a GPU hang.
2772 * This imposes the requirement that userspace reload their
2773 * CONSTANT_BUFFER on every batch, fortunately a requirement
2774 * they are already accustomed to from before contexts were
2777 wa_add(wal, ECOSKPD(RENDER_RING_BASE),
2778 0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE),
2779 0 /* XXX bit doesn't stick on Broadwater */,
2784 xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2786 struct drm_i915_private *i915 = engine->i915;
2788 /* WaKBLVECSSemaphoreWaitPoll:kbl */
2789 if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) {
2791 RING_SEMA_WAIT_POLL(engine->mmio_base),
2794 /* Wa_16018031267, Wa_16018063123 */
2795 if (NEEDS_FASTCOLOR_BLT_WABB(engine))
2796 wa_masked_field_set(wal, ECOSKPD(engine->mmio_base),
2797 XEHP_BLITTER_SCHEDULING_MODE_MASK,
2798 XEHP_BLITTER_ROUND_ROBIN_MODE);
2802 ccs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2804 if (IS_PVC_CT_STEP(engine->i915, STEP_A0, STEP_C0)) {
2805 /* Wa_14014999345:pvc */
2806 wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS, DISABLE_ECC);
2811 * The bspec performance guide has recommended MMIO tuning settings. These
2812 * aren't truly "workarounds" but we want to program them with the same
2813 * workaround infrastructure to ensure that they're automatically added to
2814 * the GuC save/restore lists, re-applied at the right times, and checked for
2815 * any conflicting programming requested by real workarounds.
2817 * Programming settings should be added here only if their registers are not
2818 * part of an engine's register state context. If a register is part of a
2819 * context, then any tuning settings should be programmed in an appropriate
2820 * function invoked by __intel_engine_init_ctx_wa().
2823 add_render_compute_tuning_settings(struct intel_gt *gt,
2824 struct i915_wa_list *wal)
2826 struct drm_i915_private *i915 = gt->i915;
2828 if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) || IS_DG2(i915))
2829 wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
2832 * This tuning setting proves beneficial only on ATS-M designs; the
2833 * default "age based" setting is optimal on regular DG2 and other
2836 if (INTEL_INFO(i915)->tuning_thread_rr_after_dep)
2837 wa_mcr_masked_field_set(wal, GEN9_ROW_CHICKEN4, THREAD_EX_ARB_MODE,
2838 THREAD_EX_ARB_MODE_RR_AFTER_DEP);
2840 if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
2841 wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC);
2845 * The workarounds in this function apply to shared registers in
2846 * the general render reset domain that aren't tied to a
2847 * specific engine. Since all render+compute engines get reset
2848 * together, and the contents of these registers are lost during
2849 * the shared render domain reset, we'll define such workarounds
2850 * here and then add them to just a single RCS or CCS engine's
2851 * workaround list (whichever engine has the XXXX flag).
2854 general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2856 struct drm_i915_private *i915 = engine->i915;
2857 struct intel_gt *gt = engine->gt;
2859 add_render_compute_tuning_settings(gt, wal);
2861 if (GRAPHICS_VER(i915) >= 11) {
2862 /* This is not a Wa (although referred to as
2863 * WaSetInidrectStateOverride in places), this allows
2864 * applications that reference sampler states through
2865 * the BindlessSamplerStateBaseAddress to have their
2866 * border color relative to DynamicStateBaseAddress
2867 * rather than BindlessSamplerStateBaseAddress.
2869 * Otherwise SAMPLER_STATE border colors have to be
2870 * copied in multiple heaps (DynamicStateBaseAddress &
2871 * BindlessSamplerStateBaseAddress)
2875 wa_mcr_masked_en(wal,
2877 GEN11_INDIRECT_STATE_BASE_ADDR_OVERRIDE);
2880 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) ||
2881 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER))
2882 /* Wa_14017856879 */
2883 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH);
2885 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2886 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))
2891 wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
2892 MTL_DISABLE_SAMPLER_SC_OOO);
2894 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))
2895 /* Wa_22015279794 */
2896 wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
2897 DISABLE_PREFETCH_INTO_IC);
2899 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2900 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
2902 /* Wa_22013037850 */
2903 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW,
2904 DISABLE_128B_EVICTION_COMMAND_UDW);
2906 /* Wa_18017747507 */
2907 wa_masked_en(wal, VFG_PREEMPTION_CHICKEN, POLYGON_TRIFAN_LINELOOP_DISABLE);
2910 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2911 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
2912 IS_PONTEVECCHIO(i915) ||
2914 /* Wa_22014226127 */
2915 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
2918 if (IS_PONTEVECCHIO(i915) || IS_DG2(i915)) {
2919 /* Wa_14015227452:dg2,pvc */
2920 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
2922 /* Wa_16015675438:dg2,pvc */
2923 wa_masked_en(wal, FF_SLICE_CS_CHICKEN2, GEN12_PERF_FIX_BALANCING_CFE_DISABLE);
2928 * Wa_16011620976:dg2_g11
2929 * Wa_22015475538:dg2
2931 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
2933 /* Wa_18028616096 */
2934 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, UGM_FRAGMENT_THRESHOLD_TO_3);
2937 if (IS_DG2_G11(i915)) {
2939 * Wa_22012826095:dg2
2940 * Wa_22013059131:dg2
2942 wa_mcr_write_clr_set(wal, LSC_CHICKEN_BIT_0_UDW,
2944 REG_FIELD_PREP(MAXREQS_PER_BANK, 2));
2946 /* Wa_22013059131:dg2 */
2947 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0,
2948 FORCE_1_SUB_MESSAGE_PER_FRAGMENT);
2953 * Note that register 0xE420 is write-only and cannot be read
2954 * back for verification on DG2 (due to Wa_14012342262), so
2955 * we need to explicitly skip the readback.
2957 wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
2958 _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
2959 0 /* write-only, so skip validation */,
2963 if (IS_XEHPSDV(i915)) {
2965 wa_mcr_masked_en(wal,
2967 SYSTOLIC_DOP_CLOCK_GATING_DIS);
2970 wa_mcr_masked_en(wal,
2972 GEN12_DISABLE_GRF_CLEAR);
2974 /* Wa_14010449647:xehpsdv */
2975 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
2976 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
2981 engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2983 if (GRAPHICS_VER(engine->i915) < 4)
2986 engine_fake_wa_init(engine, wal);
2989 * These are common workarounds that just need to applied
2990 * to a single RCS/CCS engine's workaround list since
2991 * they're reset as part of the general render domain reset.
2993 if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
2994 general_render_compute_wa_init(engine, wal);
2996 if (engine->class == COMPUTE_CLASS)
2997 ccs_engine_wa_init(engine, wal);
2998 else if (engine->class == RENDER_CLASS)
2999 rcs_engine_wa_init(engine, wal);
3001 xcs_engine_wa_init(engine, wal);
3004 void intel_engine_init_workarounds(struct intel_engine_cs *engine)
3006 struct i915_wa_list *wal = &engine->wa_list;
3008 wa_init_start(wal, engine->gt, "engine", engine->name);
3009 engine_init_workarounds(engine, wal);
3010 wa_init_finish(wal);
3013 void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
3015 wa_list_apply(&engine->wa_list);
3018 static const struct i915_range mcr_ranges_gen8[] = {
3019 { .start = 0x5500, .end = 0x55ff },
3020 { .start = 0x7000, .end = 0x7fff },
3021 { .start = 0x9400, .end = 0x97ff },
3022 { .start = 0xb000, .end = 0xb3ff },
3023 { .start = 0xe000, .end = 0xe7ff },
3027 static const struct i915_range mcr_ranges_gen12[] = {
3028 { .start = 0x8150, .end = 0x815f },
3029 { .start = 0x9520, .end = 0x955f },
3030 { .start = 0xb100, .end = 0xb3ff },
3031 { .start = 0xde80, .end = 0xe8ff },
3032 { .start = 0x24a00, .end = 0x24a7f },
3036 static const struct i915_range mcr_ranges_xehp[] = {
3037 { .start = 0x4000, .end = 0x4aff },
3038 { .start = 0x5200, .end = 0x52ff },
3039 { .start = 0x5400, .end = 0x7fff },
3040 { .start = 0x8140, .end = 0x815f },
3041 { .start = 0x8c80, .end = 0x8dff },
3042 { .start = 0x94d0, .end = 0x955f },
3043 { .start = 0x9680, .end = 0x96ff },
3044 { .start = 0xb000, .end = 0xb3ff },
3045 { .start = 0xc800, .end = 0xcfff },
3046 { .start = 0xd800, .end = 0xd8ff },
3047 { .start = 0xdc00, .end = 0xffff },
3048 { .start = 0x17000, .end = 0x17fff },
3049 { .start = 0x24a00, .end = 0x24a7f },
3053 static bool mcr_range(struct drm_i915_private *i915, u32 offset)
3055 const struct i915_range *mcr_ranges;
3058 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
3059 mcr_ranges = mcr_ranges_xehp;
3060 else if (GRAPHICS_VER(i915) >= 12)
3061 mcr_ranges = mcr_ranges_gen12;
3062 else if (GRAPHICS_VER(i915) >= 8)
3063 mcr_ranges = mcr_ranges_gen8;
3068 * Registers in these ranges are affected by the MCR selector
3069 * which only controls CPU initiated MMIO. Routing does not
3070 * work for CS access so we cannot verify them on this path.
3072 for (i = 0; mcr_ranges[i].start; i++)
3073 if (offset >= mcr_ranges[i].start &&
3074 offset <= mcr_ranges[i].end)
3081 wa_list_srm(struct i915_request *rq,
3082 const struct i915_wa_list *wal,
3083 struct i915_vma *vma)
3085 struct drm_i915_private *i915 = rq->i915;
3086 unsigned int i, count = 0;
3087 const struct i915_wa *wa;
3090 srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
3091 if (GRAPHICS_VER(i915) >= 8)
3094 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
3095 if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg)))
3099 cs = intel_ring_begin(rq, 4 * count);
3103 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
3104 u32 offset = i915_mmio_reg_offset(wa->reg);
3106 if (mcr_range(i915, offset))
3111 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
3114 intel_ring_advance(rq, cs);
3119 static int engine_wa_list_verify(struct intel_context *ce,
3120 const struct i915_wa_list * const wal,
3123 const struct i915_wa *wa;
3124 struct i915_request *rq;
3125 struct i915_vma *vma;
3126 struct i915_gem_ww_ctx ww;
3134 vma = __vm_create_scratch_for_read(&ce->engine->gt->ggtt->vm,
3135 wal->count * sizeof(u32));
3137 return PTR_ERR(vma);
3139 intel_engine_pm_get(ce->engine);
3140 i915_gem_ww_ctx_init(&ww, false);
3142 err = i915_gem_object_lock(vma->obj, &ww);
3144 err = intel_context_pin_ww(ce, &ww);
3148 err = i915_vma_pin_ww(vma, &ww, 0, 0,
3149 i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
3153 rq = i915_request_create(ce);
3159 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
3161 err = wa_list_srm(rq, wal, vma);
3163 i915_request_get(rq);
3165 i915_request_set_error_once(rq, err);
3166 i915_request_add(rq);
3171 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
3176 results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
3177 if (IS_ERR(results)) {
3178 err = PTR_ERR(results);
3183 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
3184 if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg)))
3187 if (!wa_verify(wal->gt, wa, results[i], wal->name, from))
3191 i915_gem_object_unpin_map(vma->obj);
3194 i915_request_put(rq);
3196 i915_vma_unpin(vma);
3198 intel_context_unpin(ce);
3200 if (err == -EDEADLK) {
3201 err = i915_gem_ww_ctx_backoff(&ww);
3205 i915_gem_ww_ctx_fini(&ww);
3206 intel_engine_pm_put(ce->engine);
3211 int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
3214 return engine_wa_list_verify(engine->kernel_context,
3219 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3220 #include "selftest_workarounds.c"