1 // SPDX-License-Identifier: MIT
3 * Copyright © 2014-2018 Intel Corporation
8 #include "intel_context.h"
9 #include "intel_engine_pm.h"
10 #include "intel_engine_regs.h"
11 #include "intel_gpu_commands.h"
13 #include "intel_gt_mcr.h"
14 #include "intel_gt_print.h"
15 #include "intel_gt_regs.h"
16 #include "intel_ring.h"
17 #include "intel_workarounds.h"
20 * DOC: Hardware workarounds
22 * Hardware workarounds are register programming documented to be executed in
23 * the driver that fall outside of the normal programming sequences for a
24 * platform. There are some basic categories of workarounds, depending on
25 * how/when they are applied:
27 * - Context workarounds: workarounds that touch registers that are
28 * saved/restored to/from the HW context image. The list is emitted (via Load
29 * Register Immediate commands) once when initializing the device and saved in
30 * the default context. That default context is then used on every context
31 * creation to have a "primed golden context", i.e. a context image that
32 * already contains the changes needed to all the registers.
34 * Context workarounds should be implemented in the \*_ctx_workarounds_init()
35 * variants respective to the targeted platforms.
37 * - Engine workarounds: the list of these WAs is applied whenever the specific
38 * engine is reset. It's also possible that a set of engine classes share a
39 * common power domain and they are reset together. This happens on some
40 * platforms with render and compute engines. In this case (at least) one of
41 * them need to keeep the workaround programming: the approach taken in the
42 * driver is to tie those workarounds to the first compute/render engine that
43 * is registered. When executing with GuC submission, engine resets are
44 * outside of kernel driver control, hence the list of registers involved in
45 * written once, on engine initialization, and then passed to GuC, that
46 * saves/restores their values before/after the reset takes place. See
47 * ``drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c`` for reference.
49 * Workarounds for registers specific to RCS and CCS should be implemented in
50 * rcs_engine_wa_init() and ccs_engine_wa_init(), respectively; those for
51 * registers belonging to BCS, VCS or VECS should be implemented in
52 * xcs_engine_wa_init(). Workarounds for registers not belonging to a specific
53 * engine's MMIO range but that are part of of the common RCS/CCS reset domain
54 * should be implemented in general_render_compute_wa_init().
56 * - GT workarounds: the list of these WAs is applied whenever these registers
57 * revert to their default values: on GPU reset, suspend/resume [1]_, etc.
59 * GT workarounds should be implemented in the \*_gt_workarounds_init()
60 * variants respective to the targeted platforms.
62 * - Register whitelist: some workarounds need to be implemented in userspace,
63 * but need to touch privileged registers. The whitelist in the kernel
64 * instructs the hardware to allow the access to happen. From the kernel side,
65 * this is just a special case of a MMIO workaround (as we write the list of
66 * these to/be-whitelisted registers to some special HW registers).
68 * Register whitelisting should be done in the \*_whitelist_build() variants
69 * respective to the targeted platforms.
71 * - Workaround batchbuffers: buffers that get executed automatically by the
72 * hardware on every HW context restore. These buffers are created and
73 * programmed in the default context so the hardware always go through those
74 * programming sequences when switching contexts. The support for workaround
75 * batchbuffers is enabled these hardware mechanisms:
77 * #. INDIRECT_CTX: A batchbuffer and an offset are provided in the default
78 * context, pointing the hardware to jump to that location when that offset
79 * is reached in the context restore. Workaround batchbuffer in the driver
80 * currently uses this mechanism for all platforms.
82 * #. BB_PER_CTX_PTR: A batchbuffer is provided in the default context,
83 * pointing the hardware to a buffer to continue executing after the
84 * engine registers are restored in a context restore sequence. This is
85 * currently not used in the driver.
87 * - Other: There are WAs that, due to their nature, cannot be applied from a
88 * central place. Those are peppered around the rest of the code, as needed.
89 * Workarounds related to the display IP are the main example.
91 * .. [1] Technically, some registers are powercontext saved & restored, so they
92 * survive a suspend/resume. In practice, writing them again is not too
93 * costly and simplifies things, so it's the approach taken in the driver.
96 static void wa_init_start(struct i915_wa_list *wal, struct intel_gt *gt,
97 const char *name, const char *engine_name)
101 wal->engine_name = engine_name;
104 #define WA_LIST_CHUNK (1 << 4)
106 static void wa_init_finish(struct i915_wa_list *wal)
108 /* Trim unused entries. */
109 if (!IS_ALIGNED(wal->count, WA_LIST_CHUNK)) {
110 struct i915_wa *list = kmemdup(wal->list,
111 wal->count * sizeof(*list),
123 gt_dbg(wal->gt, "Initialized %u %s workarounds on %s\n",
124 wal->wa_count, wal->name, wal->engine_name);
127 static enum forcewake_domains
128 wal_get_fw_for_rmw(struct intel_uncore *uncore, const struct i915_wa_list *wal)
130 enum forcewake_domains fw = 0;
134 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
135 fw |= intel_uncore_forcewake_for_reg(uncore,
143 static void _wa_add(struct i915_wa_list *wal, const struct i915_wa *wa)
145 unsigned int addr = i915_mmio_reg_offset(wa->reg);
146 struct drm_i915_private *i915 = wal->gt->i915;
147 unsigned int start = 0, end = wal->count;
148 const unsigned int grow = WA_LIST_CHUNK;
151 GEM_BUG_ON(!is_power_of_2(grow));
153 if (IS_ALIGNED(wal->count, grow)) { /* Either uninitialized or full. */
154 struct i915_wa *list;
156 list = kmalloc_array(ALIGN(wal->count + 1, grow), sizeof(*wa),
159 drm_err(&i915->drm, "No space for workaround init!\n");
164 memcpy(list, wal->list, sizeof(*wa) * wal->count);
171 while (start < end) {
172 unsigned int mid = start + (end - start) / 2;
174 if (i915_mmio_reg_offset(wal->list[mid].reg) < addr) {
176 } else if (i915_mmio_reg_offset(wal->list[mid].reg) > addr) {
179 wa_ = &wal->list[mid];
181 if ((wa->clr | wa_->clr) && !(wa->clr & ~wa_->clr)) {
183 "Discarding overwritten w/a for reg %04x (clear: %08x, set: %08x)\n",
184 i915_mmio_reg_offset(wa_->reg),
187 wa_->set &= ~wa->clr;
193 wa_->read |= wa->read;
199 wa_ = &wal->list[wal->count++];
202 while (wa_-- > wal->list) {
203 GEM_BUG_ON(i915_mmio_reg_offset(wa_[0].reg) ==
204 i915_mmio_reg_offset(wa_[1].reg));
205 if (i915_mmio_reg_offset(wa_[1].reg) >
206 i915_mmio_reg_offset(wa_[0].reg))
209 swap(wa_[1], wa_[0]);
213 static void wa_add(struct i915_wa_list *wal, i915_reg_t reg,
214 u32 clear, u32 set, u32 read_mask, bool masked_reg)
216 struct i915_wa wa = {
221 .masked_reg = masked_reg,
227 static void wa_mcr_add(struct i915_wa_list *wal, i915_mcr_reg_t reg,
228 u32 clear, u32 set, u32 read_mask, bool masked_reg)
230 struct i915_wa wa = {
235 .masked_reg = masked_reg,
243 wa_write_clr_set(struct i915_wa_list *wal, i915_reg_t reg, u32 clear, u32 set)
245 wa_add(wal, reg, clear, set, clear | set, false);
249 wa_mcr_write_clr_set(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clear, u32 set)
251 wa_mcr_add(wal, reg, clear, set, clear | set, false);
255 wa_write(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
257 wa_write_clr_set(wal, reg, ~0, set);
261 wa_mcr_write(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set)
263 wa_mcr_write_clr_set(wal, reg, ~0, set);
267 wa_write_or(struct i915_wa_list *wal, i915_reg_t reg, u32 set)
269 wa_write_clr_set(wal, reg, set, set);
273 wa_mcr_write_or(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 set)
275 wa_mcr_write_clr_set(wal, reg, set, set);
279 wa_write_clr(struct i915_wa_list *wal, i915_reg_t reg, u32 clr)
281 wa_write_clr_set(wal, reg, clr, 0);
285 wa_mcr_write_clr(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 clr)
287 wa_mcr_write_clr_set(wal, reg, clr, 0);
291 * WA operations on "masked register". A masked register has the upper 16 bits
292 * documented as "masked" in b-spec. Its purpose is to allow writing to just a
293 * portion of the register without a rmw: you simply write in the upper 16 bits
294 * the mask of bits you are going to modify.
296 * The wa_masked_* family of functions already does the necessary operations to
297 * calculate the mask based on the parameters passed, so user only has to
298 * provide the lower 16 bits of that register.
302 wa_masked_en(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
304 wa_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true);
308 wa_mcr_masked_en(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
310 wa_mcr_add(wal, reg, 0, _MASKED_BIT_ENABLE(val), val, true);
314 wa_masked_dis(struct i915_wa_list *wal, i915_reg_t reg, u32 val)
316 wa_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true);
320 wa_mcr_masked_dis(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 val)
322 wa_mcr_add(wal, reg, 0, _MASKED_BIT_DISABLE(val), val, true);
326 wa_masked_field_set(struct i915_wa_list *wal, i915_reg_t reg,
329 wa_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true);
333 wa_mcr_masked_field_set(struct i915_wa_list *wal, i915_mcr_reg_t reg,
336 wa_mcr_add(wal, reg, 0, _MASKED_FIELD(mask, val), mask, true);
339 static void gen6_ctx_workarounds_init(struct intel_engine_cs *engine,
340 struct i915_wa_list *wal)
342 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
345 static void gen7_ctx_workarounds_init(struct intel_engine_cs *engine,
346 struct i915_wa_list *wal)
348 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
351 static void gen8_ctx_workarounds_init(struct intel_engine_cs *engine,
352 struct i915_wa_list *wal)
354 wa_masked_en(wal, INSTPM, INSTPM_FORCE_ORDERING);
356 /* WaDisableAsyncFlipPerfMode:bdw,chv */
357 wa_masked_en(wal, RING_MI_MODE(RENDER_RING_BASE), ASYNC_FLIP_PERF_DISABLE);
359 /* WaDisablePartialInstShootdown:bdw,chv */
360 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
361 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
363 /* Use Force Non-Coherent whenever executing a 3D context. This is a
364 * workaround for a possible hang in the unlikely event a TLB
365 * invalidation occurs during a PSD flush.
367 /* WaForceEnableNonCoherent:bdw,chv */
368 /* WaHdcDisableFetchWhenMasked:bdw,chv */
369 wa_masked_en(wal, HDC_CHICKEN0,
370 HDC_DONOT_FETCH_MEM_WHEN_MASKED |
371 HDC_FORCE_NON_COHERENT);
373 /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
374 * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
375 * polygons in the same 8x4 pixel/sample area to be processed without
376 * stalling waiting for the earlier ones to write to Hierarchical Z
379 * This optimization is off by default for BDW and CHV; turn it on.
381 wa_masked_dis(wal, CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
383 /* Wa4x4STCOptimizationDisable:bdw,chv */
384 wa_masked_en(wal, CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
387 * BSpec recommends 8x4 when MSAA is used,
388 * however in practice 16x4 seems fastest.
390 * Note that PS/WM thread counts depend on the WIZ hashing
391 * disable bit, which we don't touch here, but it's good
392 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
394 wa_masked_field_set(wal, GEN7_GT_MODE,
395 GEN6_WIZ_HASHING_MASK,
396 GEN6_WIZ_HASHING_16x4);
399 static void bdw_ctx_workarounds_init(struct intel_engine_cs *engine,
400 struct i915_wa_list *wal)
402 struct drm_i915_private *i915 = engine->i915;
404 gen8_ctx_workarounds_init(engine, wal);
406 /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
407 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
409 /* WaDisableDopClockGating:bdw
411 * Also see the related UCGTCL1 write in bdw_init_clock_gating()
412 * to disable EUTC clock gating.
414 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
415 DOP_CLOCK_GATING_DISABLE);
417 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN3,
418 GEN8_SAMPLER_POWER_BYPASS_DIS);
420 wa_masked_en(wal, HDC_CHICKEN0,
421 /* WaForceContextSaveRestoreNonCoherent:bdw */
422 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
423 /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
424 (IS_BROADWELL_GT3(i915) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
427 static void chv_ctx_workarounds_init(struct intel_engine_cs *engine,
428 struct i915_wa_list *wal)
430 gen8_ctx_workarounds_init(engine, wal);
432 /* WaDisableThreadStallDopClockGating:chv */
433 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
435 /* Improve HiZ throughput on CHV. */
436 wa_masked_en(wal, HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
439 static void gen9_ctx_workarounds_init(struct intel_engine_cs *engine,
440 struct i915_wa_list *wal)
442 struct drm_i915_private *i915 = engine->i915;
445 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
447 * Must match Display Engine. See
448 * WaCompressedResourceDisplayNewHashMode.
450 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
451 GEN9_PBE_COMPRESSED_HASH_SELECTION);
452 wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
453 GEN9_SAMPLER_HASH_COMPRESSED_READ_ADDR);
456 /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl,glk,cfl */
457 /* WaDisablePartialInstShootdown:skl,bxt,kbl,glk,cfl */
458 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
459 FLOW_CONTROL_ENABLE |
460 PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
462 /* WaEnableYV12BugFixInHalfSliceChicken7:skl,bxt,kbl,glk,cfl */
463 /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl,cfl */
464 wa_mcr_masked_en(wal, GEN9_HALF_SLICE_CHICKEN7,
465 GEN9_ENABLE_YV12_BUGFIX |
466 GEN9_ENABLE_GPGPU_PREEMPTION);
468 /* Wa4x4STCOptimizationDisable:skl,bxt,kbl,glk,cfl */
469 /* WaDisablePartialResolveInVc:skl,bxt,kbl,cfl */
470 wa_masked_en(wal, CACHE_MODE_1,
471 GEN8_4x4_STC_OPTIMIZATION_DISABLE |
472 GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE);
474 /* WaCcsTlbPrefetchDisable:skl,bxt,kbl,glk,cfl */
475 wa_mcr_masked_dis(wal, GEN9_HALF_SLICE_CHICKEN5,
476 GEN9_CCS_TLB_PREFETCH_ENABLE);
478 /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl,cfl */
479 wa_masked_en(wal, HDC_CHICKEN0,
480 HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
481 HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
483 /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
484 * both tied to WaForceContextSaveRestoreNonCoherent
485 * in some hsds for skl. We keep the tie for all gen9. The
486 * documentation is a bit hazy and so we want to get common behaviour,
487 * even though there is no clear evidence we would need both on kbl/bxt.
488 * This area has been source of system hangs so we play it safe
489 * and mimic the skl regardless of what bspec says.
491 * Use Force Non-Coherent whenever executing a 3D context. This
492 * is a workaround for a possible hang in the unlikely event
493 * a TLB invalidation occurs during a PSD flush.
496 /* WaForceEnableNonCoherent:skl,bxt,kbl,cfl */
497 wa_masked_en(wal, HDC_CHICKEN0,
498 HDC_FORCE_NON_COHERENT);
500 /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl,cfl */
501 if (IS_SKYLAKE(i915) ||
503 IS_COFFEELAKE(i915) ||
505 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN3,
506 GEN8_SAMPLER_POWER_BYPASS_DIS);
508 /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl,glk,cfl */
509 wa_mcr_masked_en(wal, HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
512 * Supporting preemption with fine-granularity requires changes in the
513 * batch buffer programming. Since we can't break old userspace, we
514 * need to set our default preemption level to safe value. Userspace is
515 * still able to use more fine-grained preemption levels, since in
516 * WaEnablePreemptionGranularityControlByUMD we're whitelisting the
517 * per-ctx register. As such, WaDisable{3D,GPGPU}MidCmdPreemption are
518 * not real HW workarounds, but merely a way to start using preemption
519 * while maintaining old contract with userspace.
522 /* WaDisable3DMidCmdPreemption:skl,bxt,glk,cfl,[cnl] */
523 wa_masked_dis(wal, GEN8_CS_CHICKEN1, GEN9_PREEMPT_3D_OBJECT_LEVEL);
525 /* WaDisableGPGPUMidCmdPreemption:skl,bxt,blk,cfl,[cnl] */
526 wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
527 GEN9_PREEMPT_GPGPU_LEVEL_MASK,
528 GEN9_PREEMPT_GPGPU_COMMAND_LEVEL);
530 /* WaClearHIZ_WM_CHICKEN3:bxt,glk */
531 if (IS_GEN9_LP(i915))
532 wa_masked_en(wal, GEN9_WM_CHICKEN3, GEN9_FACTOR_IN_CLR_VAL_HIZ);
535 static void skl_tune_iz_hashing(struct intel_engine_cs *engine,
536 struct i915_wa_list *wal)
538 struct intel_gt *gt = engine->gt;
539 u8 vals[3] = { 0, 0, 0 };
542 for (i = 0; i < 3; i++) {
546 * Only consider slices where one, and only one, subslice has 7
549 if (!is_power_of_2(gt->info.sseu.subslice_7eu[i]))
553 * subslice_7eu[i] != 0 (because of the check above) and
554 * ss_max == 4 (maximum number of subslices possible per slice)
558 ss = ffs(gt->info.sseu.subslice_7eu[i]) - 1;
562 if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
565 /* Tune IZ hashing. See intel_device_info_runtime_init() */
566 wa_masked_field_set(wal, GEN7_GT_MODE,
567 GEN9_IZ_HASHING_MASK(2) |
568 GEN9_IZ_HASHING_MASK(1) |
569 GEN9_IZ_HASHING_MASK(0),
570 GEN9_IZ_HASHING(2, vals[2]) |
571 GEN9_IZ_HASHING(1, vals[1]) |
572 GEN9_IZ_HASHING(0, vals[0]));
575 static void skl_ctx_workarounds_init(struct intel_engine_cs *engine,
576 struct i915_wa_list *wal)
578 gen9_ctx_workarounds_init(engine, wal);
579 skl_tune_iz_hashing(engine, wal);
582 static void bxt_ctx_workarounds_init(struct intel_engine_cs *engine,
583 struct i915_wa_list *wal)
585 gen9_ctx_workarounds_init(engine, wal);
587 /* WaDisableThreadStallDopClockGating:bxt */
588 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN,
589 STALL_DOP_GATING_DISABLE);
591 /* WaToEnableHwFixForPushConstHWBug:bxt */
592 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
593 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
596 static void kbl_ctx_workarounds_init(struct intel_engine_cs *engine,
597 struct i915_wa_list *wal)
599 struct drm_i915_private *i915 = engine->i915;
601 gen9_ctx_workarounds_init(engine, wal);
603 /* WaToEnableHwFixForPushConstHWBug:kbl */
604 if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_C0, STEP_FOREVER))
605 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
606 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
608 /* WaDisableSbeCacheDispatchPortSharing:kbl */
609 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
610 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
613 static void glk_ctx_workarounds_init(struct intel_engine_cs *engine,
614 struct i915_wa_list *wal)
616 gen9_ctx_workarounds_init(engine, wal);
618 /* WaToEnableHwFixForPushConstHWBug:glk */
619 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
620 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
623 static void cfl_ctx_workarounds_init(struct intel_engine_cs *engine,
624 struct i915_wa_list *wal)
626 gen9_ctx_workarounds_init(engine, wal);
628 /* WaToEnableHwFixForPushConstHWBug:cfl */
629 wa_masked_en(wal, COMMON_SLICE_CHICKEN2,
630 GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
632 /* WaDisableSbeCacheDispatchPortSharing:cfl */
633 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
634 GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
637 static void icl_ctx_workarounds_init(struct intel_engine_cs *engine,
638 struct i915_wa_list *wal)
640 /* Wa_1406697149 (WaDisableBankHangMode:icl) */
641 wa_write(wal, GEN8_L3CNTLREG, GEN8_ERRDETBCTRL);
643 /* WaForceEnableNonCoherent:icl
644 * This is not the same workaround as in early Gen9 platforms, where
645 * lacking this could cause system hangs, but coherency performance
646 * overhead is high and only a few compute workloads really need it
647 * (the register is whitelisted in hardware now, so UMDs can opt in
648 * for coherency if they have a good reason).
650 wa_mcr_masked_en(wal, ICL_HDC_MODE, HDC_FORCE_NON_COHERENT);
652 /* WaEnableFloatBlendOptimization:icl */
653 wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
654 _MASKED_BIT_ENABLE(FLOAT_BLEND_OPTIMIZATION_ENABLE),
655 0 /* write-only, so skip validation */,
658 /* WaDisableGPGPUMidThreadPreemption:icl */
659 wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
660 GEN9_PREEMPT_GPGPU_LEVEL_MASK,
661 GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
663 /* allow headerless messages for preemptible GPGPU context */
664 wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
665 GEN11_SAMPLER_ENABLE_HEADLESS_MSG);
667 /* Wa_1604278689:icl,ehl */
668 wa_write(wal, IVB_FBC_RT_BASE, 0xFFFFFFFF & ~ILK_FBC_RT_VALID);
669 wa_write_clr_set(wal, IVB_FBC_RT_BASE_UPPER,
673 /* Wa_1406306137:icl,ehl */
674 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN11_DIS_PICK_2ND_EU);
678 * These settings aren't actually workarounds, but general tuning settings that
679 * need to be programmed on dg2 platform.
681 static void dg2_ctx_gt_tuning_init(struct intel_engine_cs *engine,
682 struct i915_wa_list *wal)
684 wa_mcr_masked_en(wal, CHICKEN_RASTER_2, TBIMR_FAST_CLIP);
685 wa_mcr_write_clr_set(wal, XEHP_L3SQCREG5, L3_PWM_TIMER_INIT_VAL_MASK,
686 REG_FIELD_PREP(L3_PWM_TIMER_INIT_VAL_MASK, 0x7f));
687 wa_mcr_write_clr_set(wal, XEHP_FF_MODE2, FF_MODE2_TDS_TIMER_MASK,
688 FF_MODE2_TDS_TIMER_128);
691 static void gen12_ctx_workarounds_init(struct intel_engine_cs *engine,
692 struct i915_wa_list *wal)
694 struct drm_i915_private *i915 = engine->i915;
697 * Wa_1409142259:tgl,dg1,adl-p
698 * Wa_1409347922:tgl,dg1,adl-p
699 * Wa_1409252684:tgl,dg1,adl-p
700 * Wa_1409217633:tgl,dg1,adl-p
701 * Wa_1409207793:tgl,dg1,adl-p
702 * Wa_1409178076:tgl,dg1,adl-p
703 * Wa_1408979724:tgl,dg1,adl-p
704 * Wa_14010443199:tgl,rkl,dg1,adl-p
705 * Wa_14010698770:tgl,rkl,dg1,adl-s,adl-p
706 * Wa_1409342910:tgl,rkl,dg1,adl-s,adl-p
708 wa_masked_en(wal, GEN11_COMMON_SLICE_CHICKEN3,
709 GEN12_DISABLE_CPS_AWARE_COLOR_PIPE);
711 /* WaDisableGPGPUMidThreadPreemption:gen12 */
712 wa_masked_field_set(wal, GEN8_CS_CHICKEN1,
713 GEN9_PREEMPT_GPGPU_LEVEL_MASK,
714 GEN9_PREEMPT_GPGPU_THREAD_GROUP_LEVEL);
717 * Wa_16011163337 - GS_TIMER
719 * TDS_TIMER: Although some platforms refer to it as Wa_1604555607, we
720 * need to program it even on those that don't explicitly list that
723 * Note that the programming of GEN12_FF_MODE2 is further modified
724 * according to the FF_MODE2 guidance given by Wa_1608008084.
725 * Wa_1608008084 tells us the FF_MODE2 register will return the wrong
726 * value when read from the CPU.
728 * The default value for this register is zero for all fields.
729 * So instead of doing a RMW we should just write the desired values
730 * for TDS and GS timers. Note that since the readback can't be trusted,
731 * the clear mask is just set to ~0 to make sure other bits are not
732 * inadvertently set. For the same reason read verification is ignored.
737 FF_MODE2_TDS_TIMER_128 | FF_MODE2_GS_TIMER_224,
742 wa_masked_en(wal, HIZ_CHICKEN, HZ_DEPTH_TEST_LE_GE_OPT_DISABLE);
745 wa_masked_en(wal, COMMON_SLICE_CHICKEN4, DISABLE_TDC_LOAD_BALANCING_CALC);
749 static void dg1_ctx_workarounds_init(struct intel_engine_cs *engine,
750 struct i915_wa_list *wal)
752 gen12_ctx_workarounds_init(engine, wal);
755 wa_masked_dis(wal, GEN11_COMMON_SLICE_CHICKEN3,
756 DG1_FLOAT_POINT_BLEND_OPT_STRICT_MODE_EN);
759 wa_masked_en(wal, HIZ_CHICKEN,
760 DG1_HZ_READ_SUPPRESSION_OPTIMIZATION_DISABLE);
763 static void dg2_ctx_workarounds_init(struct intel_engine_cs *engine,
764 struct i915_wa_list *wal)
766 dg2_ctx_gt_tuning_init(engine, wal);
768 /* Wa_16013271637:dg2 */
769 wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1,
770 MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
772 /* Wa_14014947963:dg2 */
773 wa_masked_field_set(wal, VF_PREEMPTION, PREEMPTION_VERTEX_COUNT, 0x4000);
775 /* Wa_18018764978:dg2 */
776 wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
778 /* Wa_18019271663:dg2 */
779 wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
781 /* Wa_14019877138:dg2 */
782 wa_mcr_masked_en(wal, XEHP_PSS_CHICKEN, FD_END_COLLECT);
785 static void xelpg_ctx_gt_tuning_init(struct intel_engine_cs *engine,
786 struct i915_wa_list *wal)
788 struct intel_gt *gt = engine->gt;
790 dg2_ctx_gt_tuning_init(engine, wal);
792 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) ||
793 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER))
794 wa_add(wal, DRAW_WATERMARK, VERT_WM_VAL, 0x3FF, 0, false);
797 static void xelpg_ctx_workarounds_init(struct intel_engine_cs *engine,
798 struct i915_wa_list *wal)
800 struct intel_gt *gt = engine->gt;
802 xelpg_ctx_gt_tuning_init(engine, wal);
804 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
805 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
807 wa_masked_field_set(wal, VF_PREEMPTION,
808 PREEMPTION_VERTEX_COUNT, 0x4000);
811 wa_mcr_masked_en(wal, XEHP_SLICE_COMMON_ECO_CHICKEN1,
812 MSC_MSAA_REODER_BUF_BYPASS_DISABLE);
815 wa_mcr_masked_en(wal, VFLSKPD, VF_PREFETCH_TLB_DIS);
818 wa_mcr_masked_en(wal, XEHP_PSS_MODE2, SCOREBOARD_STALL_FLUSH_CONTROL);
822 wa_masked_en(wal, CACHE_MODE_1, MSAA_OPTIMIZATION_REDUC_DISABLE);
825 static void fakewa_disable_nestedbb_mode(struct intel_engine_cs *engine,
826 struct i915_wa_list *wal)
829 * This is a "fake" workaround defined by software to ensure we
830 * maintain reliable, backward-compatible behavior for userspace with
831 * regards to how nested MI_BATCH_BUFFER_START commands are handled.
833 * The per-context setting of MI_MODE[12] determines whether the bits
834 * of a nested MI_BATCH_BUFFER_START instruction should be interpreted
835 * in the traditional manner or whether they should instead use a new
836 * tgl+ meaning that breaks backward compatibility, but allows nesting
837 * into 3rd-level batchbuffers. When this new capability was first
838 * added in TGL, it remained off by default unless a context
839 * intentionally opted in to the new behavior. However Xe_HPG now
840 * flips this on by default and requires that we explicitly opt out if
841 * we don't want the new behavior.
843 * From a SW perspective, we want to maintain the backward-compatible
844 * behavior for userspace, so we'll apply a fake workaround to set it
845 * back to the legacy behavior on platforms where the hardware default
846 * is to break compatibility. At the moment there is no Linux
847 * userspace that utilizes third-level batchbuffers, so this will avoid
848 * userspace from needing to make any changes. using the legacy
849 * meaning is the correct thing to do. If/when we have userspace
850 * consumers that want to utilize third-level batch nesting, we can
851 * provide a context parameter to allow them to opt-in.
853 wa_masked_dis(wal, RING_MI_MODE(engine->mmio_base), TGL_NESTED_BB_EN);
856 static void gen12_ctx_gt_mocs_init(struct intel_engine_cs *engine,
857 struct i915_wa_list *wal)
862 * Some blitter commands do not have a field for MOCS, those
863 * commands will use MOCS index pointed by BLIT_CCTL.
864 * BLIT_CCTL registers are needed to be programmed to un-cached.
866 if (engine->class == COPY_ENGINE_CLASS) {
867 mocs = engine->gt->mocs.uc_index;
868 wa_write_clr_set(wal,
869 BLIT_CCTL(engine->mmio_base),
871 BLIT_CCTL_MOCS(mocs, mocs));
876 * gen12_ctx_gt_fake_wa_init() aren't programmingan official workaround
877 * defined by the hardware team, but it programming general context registers.
878 * Adding those context register programming in context workaround
879 * allow us to use the wa framework for proper application and validation.
882 gen12_ctx_gt_fake_wa_init(struct intel_engine_cs *engine,
883 struct i915_wa_list *wal)
885 if (GRAPHICS_VER_FULL(engine->i915) >= IP_VER(12, 55))
886 fakewa_disable_nestedbb_mode(engine, wal);
888 gen12_ctx_gt_mocs_init(engine, wal);
892 __intel_engine_init_ctx_wa(struct intel_engine_cs *engine,
893 struct i915_wa_list *wal,
896 struct drm_i915_private *i915 = engine->i915;
898 wa_init_start(wal, engine->gt, name, engine->name);
900 /* Applies to all engines */
902 * Fake workarounds are not the actual workaround but
903 * programming of context registers using workaround framework.
905 if (GRAPHICS_VER(i915) >= 12)
906 gen12_ctx_gt_fake_wa_init(engine, wal);
908 if (engine->class != RENDER_CLASS)
911 if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71)))
912 xelpg_ctx_workarounds_init(engine, wal);
913 else if (IS_PONTEVECCHIO(i915))
914 ; /* noop; none at this time */
915 else if (IS_DG2(i915))
916 dg2_ctx_workarounds_init(engine, wal);
917 else if (IS_XEHPSDV(i915))
918 ; /* noop; none at this time */
919 else if (IS_DG1(i915))
920 dg1_ctx_workarounds_init(engine, wal);
921 else if (GRAPHICS_VER(i915) == 12)
922 gen12_ctx_workarounds_init(engine, wal);
923 else if (GRAPHICS_VER(i915) == 11)
924 icl_ctx_workarounds_init(engine, wal);
925 else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
926 cfl_ctx_workarounds_init(engine, wal);
927 else if (IS_GEMINILAKE(i915))
928 glk_ctx_workarounds_init(engine, wal);
929 else if (IS_KABYLAKE(i915))
930 kbl_ctx_workarounds_init(engine, wal);
931 else if (IS_BROXTON(i915))
932 bxt_ctx_workarounds_init(engine, wal);
933 else if (IS_SKYLAKE(i915))
934 skl_ctx_workarounds_init(engine, wal);
935 else if (IS_CHERRYVIEW(i915))
936 chv_ctx_workarounds_init(engine, wal);
937 else if (IS_BROADWELL(i915))
938 bdw_ctx_workarounds_init(engine, wal);
939 else if (GRAPHICS_VER(i915) == 7)
940 gen7_ctx_workarounds_init(engine, wal);
941 else if (GRAPHICS_VER(i915) == 6)
942 gen6_ctx_workarounds_init(engine, wal);
943 else if (GRAPHICS_VER(i915) < 8)
946 MISSING_CASE(GRAPHICS_VER(i915));
952 void intel_engine_init_ctx_wa(struct intel_engine_cs *engine)
954 __intel_engine_init_ctx_wa(engine, &engine->ctx_wa_list, "context");
957 int intel_engine_emit_ctx_wa(struct i915_request *rq)
959 struct i915_wa_list *wal = &rq->engine->ctx_wa_list;
960 struct intel_uncore *uncore = rq->engine->uncore;
961 enum forcewake_domains fw;
971 ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
975 cs = intel_ring_begin(rq, (wal->count * 2 + 2));
979 fw = wal_get_fw_for_rmw(uncore, wal);
981 intel_gt_mcr_lock(wal->gt, &flags);
982 spin_lock(&uncore->lock);
983 intel_uncore_forcewake_get__locked(uncore, fw);
985 *cs++ = MI_LOAD_REGISTER_IMM(wal->count);
986 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
989 /* Skip reading the register if it's not really needed */
990 if (wa->masked_reg || (wa->clr | wa->set) == U32_MAX) {
994 intel_gt_mcr_read_any_fw(wal->gt, wa->mcr_reg) :
995 intel_uncore_read_fw(uncore, wa->reg);
1000 *cs++ = i915_mmio_reg_offset(wa->reg);
1005 intel_uncore_forcewake_put__locked(uncore, fw);
1006 spin_unlock(&uncore->lock);
1007 intel_gt_mcr_unlock(wal->gt, flags);
1009 intel_ring_advance(rq, cs);
1011 ret = rq->engine->emit_flush(rq, EMIT_BARRIER);
1019 gen4_gt_workarounds_init(struct intel_gt *gt,
1020 struct i915_wa_list *wal)
1022 /* WaDisable_RenderCache_OperationalFlush:gen4,ilk */
1023 wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
1027 g4x_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1029 gen4_gt_workarounds_init(gt, wal);
1031 /* WaDisableRenderCachePipelinedFlush:g4x,ilk */
1032 wa_masked_en(wal, CACHE_MODE_0, CM0_PIPELINED_RENDER_FLUSH_DISABLE);
1036 ilk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1038 g4x_gt_workarounds_init(gt, wal);
1040 wa_masked_en(wal, _3D_CHICKEN2, _3D_CHICKEN2_WM_READ_PIPELINED);
1044 snb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1049 ivb_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1051 /* Apply the WaDisableRHWOOptimizationForRenderHang:ivb workaround. */
1053 GEN7_COMMON_SLICE_CHICKEN1,
1054 GEN7_CSC1_RHWO_OPT_DISABLE_IN_RCC);
1056 /* WaApplyL3ControlAndL3ChickenMode:ivb */
1057 wa_write(wal, GEN7_L3CNTLREG1, GEN7_WA_FOR_GEN7_L3_CONTROL);
1058 wa_write(wal, GEN7_L3_CHICKEN_MODE_REGISTER, GEN7_WA_L3_CHICKEN_MODE);
1060 /* WaForceL3Serialization:ivb */
1061 wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
1065 vlv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1067 /* WaForceL3Serialization:vlv */
1068 wa_write_clr(wal, GEN7_L3SQCREG4, L3SQ_URB_READ_CAM_MATCH_DISABLE);
1071 * WaIncreaseL3CreditsForVLVB0:vlv
1072 * This is the hardware default actually.
1074 wa_write(wal, GEN7_L3SQCREG1, VLV_B0_WA_L3SQCREG1_VALUE);
1078 hsw_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1080 /* L3 caching of data atomics doesn't work -- disable it. */
1081 wa_write(wal, HSW_SCRATCH1, HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE);
1084 HSW_ROW_CHICKEN3, 0,
1085 _MASKED_BIT_ENABLE(HSW_ROW_CHICKEN3_L3_GLOBAL_ATOMICS_DISABLE),
1086 0 /* XXX does this reg exist? */, true);
1088 /* WaVSRefCountFullforceMissDisable:hsw */
1089 wa_write_clr(wal, GEN7_FF_THREAD_MODE, GEN7_FF_VS_REF_CNT_FFME);
1093 gen9_wa_init_mcr(struct drm_i915_private *i915, struct i915_wa_list *wal)
1095 const struct sseu_dev_info *sseu = &to_gt(i915)->info.sseu;
1096 unsigned int slice, subslice;
1099 GEM_BUG_ON(GRAPHICS_VER(i915) != 9);
1102 * WaProgramMgsrForCorrectSliceSpecificMmioReads:gen9,glk,kbl,cml
1103 * Before any MMIO read into slice/subslice specific registers, MCR
1104 * packet control register needs to be programmed to point to any
1105 * enabled s/ss pair. Otherwise, incorrect values will be returned.
1106 * This means each subsequent MMIO read will be forwarded to an
1107 * specific s/ss combination, but this is OK since these registers
1108 * are consistent across s/ss in almost all cases. In the rare
1109 * occasions, such as INSTDONE, where this value is dependent
1110 * on s/ss combo, the read should be done with read_subslice_reg.
1112 slice = ffs(sseu->slice_mask) - 1;
1113 GEM_BUG_ON(slice >= ARRAY_SIZE(sseu->subslice_mask.hsw));
1114 subslice = ffs(intel_sseu_get_hsw_subslices(sseu, slice));
1115 GEM_BUG_ON(!subslice);
1119 * We use GEN8_MCR..() macros to calculate the |mcr| value for
1120 * Gen9 to address WaProgramMgsrForCorrectSliceSpecificMmioReads
1122 mcr = GEN8_MCR_SLICE(slice) | GEN8_MCR_SUBSLICE(subslice);
1123 mcr_mask = GEN8_MCR_SLICE_MASK | GEN8_MCR_SUBSLICE_MASK;
1125 drm_dbg(&i915->drm, "MCR slice:%d/subslice:%d = %x\n", slice, subslice, mcr);
1127 wa_write_clr_set(wal, GEN8_MCR_SELECTOR, mcr_mask, mcr);
1131 gen9_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1133 struct drm_i915_private *i915 = gt->i915;
1135 /* WaProgramMgsrForCorrectSliceSpecificMmioReads:glk,kbl,cml,gen9 */
1136 gen9_wa_init_mcr(i915, wal);
1138 /* WaDisableKillLogic:bxt,skl,kbl */
1139 if (!IS_COFFEELAKE(i915) && !IS_COMETLAKE(i915))
1144 if (HAS_LLC(i915)) {
1145 /* WaCompressedResourceSamplerPbeMediaNewHashMode:skl,kbl
1147 * Must match Display Engine. See
1148 * WaCompressedResourceDisplayNewHashMode.
1152 MMCD_PCLA | MMCD_HOTSPOT_EN);
1155 /* WaDisableHDCInvalidation:skl,bxt,kbl,cfl */
1158 BDW_DISABLE_HDC_INVALIDATION);
1162 skl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1164 gen9_gt_workarounds_init(gt, wal);
1166 /* WaDisableGafsUnitClkGating:skl */
1169 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1171 /* WaInPlaceDecompressionHang:skl */
1172 if (IS_SKYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, STEP_A0, STEP_H0))
1174 GEN9_GAMT_ECO_REG_RW_IA,
1175 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1179 kbl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1181 gen9_gt_workarounds_init(gt, wal);
1183 /* WaDisableDynamicCreditSharing:kbl */
1184 if (IS_KABYLAKE(gt->i915) && IS_GRAPHICS_STEP(gt->i915, 0, STEP_C0))
1187 GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
1189 /* WaDisableGafsUnitClkGating:kbl */
1192 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1194 /* WaInPlaceDecompressionHang:kbl */
1196 GEN9_GAMT_ECO_REG_RW_IA,
1197 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1201 glk_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1203 gen9_gt_workarounds_init(gt, wal);
1207 cfl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1209 gen9_gt_workarounds_init(gt, wal);
1211 /* WaDisableGafsUnitClkGating:cfl */
1214 GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
1216 /* WaInPlaceDecompressionHang:cfl */
1218 GEN9_GAMT_ECO_REG_RW_IA,
1219 GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
1222 static void __set_mcr_steering(struct i915_wa_list *wal,
1223 i915_reg_t steering_reg,
1224 unsigned int slice, unsigned int subslice)
1228 mcr = GEN11_MCR_SLICE(slice) | GEN11_MCR_SUBSLICE(subslice);
1229 mcr_mask = GEN11_MCR_SLICE_MASK | GEN11_MCR_SUBSLICE_MASK;
1231 wa_write_clr_set(wal, steering_reg, mcr_mask, mcr);
1234 static void debug_dump_steering(struct intel_gt *gt)
1236 struct drm_printer p = drm_debug_printer("MCR Steering:");
1238 if (drm_debug_enabled(DRM_UT_DRIVER))
1239 intel_gt_mcr_report_steering(&p, gt, false);
1242 static void __add_mcr_wa(struct intel_gt *gt, struct i915_wa_list *wal,
1243 unsigned int slice, unsigned int subslice)
1245 __set_mcr_steering(wal, GEN8_MCR_SELECTOR, slice, subslice);
1247 gt->default_steering.groupid = slice;
1248 gt->default_steering.instanceid = subslice;
1250 debug_dump_steering(gt);
1254 icl_wa_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
1256 const struct sseu_dev_info *sseu = >->info.sseu;
1257 unsigned int subslice;
1259 GEM_BUG_ON(GRAPHICS_VER(gt->i915) < 11);
1260 GEM_BUG_ON(hweight8(sseu->slice_mask) > 1);
1263 * Although a platform may have subslices, we need to always steer
1264 * reads to the lowest instance that isn't fused off. When Render
1265 * Power Gating is enabled, grabbing forcewake will only power up a
1266 * single subslice (the "minconfig") if there isn't a real workload
1267 * that needs to be run; this means that if we steer register reads to
1268 * one of the higher subslices, we run the risk of reading back 0's or
1271 subslice = __ffs(intel_sseu_get_hsw_subslices(sseu, 0));
1274 * If the subslice we picked above also steers us to a valid L3 bank,
1275 * then we can just rely on the default steering and won't need to
1276 * worry about explicitly re-steering L3BANK reads later.
1278 if (gt->info.l3bank_mask & BIT(subslice))
1279 gt->steering_table[L3BANK] = NULL;
1281 __add_mcr_wa(gt, wal, 0, subslice);
1285 xehp_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
1287 const struct sseu_dev_info *sseu = >->info.sseu;
1288 unsigned long slice, subslice = 0, slice_mask = 0;
1293 * On Xe_HP the steering increases in complexity. There are now several
1294 * more units that require steering and we're not guaranteed to be able
1295 * to find a common setting for all of them. These are:
1296 * - GSLICE (fusable)
1297 * - DSS (sub-unit within gslice; fusable)
1298 * - L3 Bank (fusable)
1299 * - MSLICE (fusable)
1300 * - LNCF (sub-unit within mslice; always present if mslice is present)
1302 * We'll do our default/implicit steering based on GSLICE (in the
1303 * sliceid field) and DSS (in the subsliceid field). If we can
1304 * find overlap between the valid MSLICE and/or LNCF values with
1305 * a suitable GSLICE, then we can just re-use the default value and
1306 * skip and explicit steering at runtime.
1308 * We only need to look for overlap between GSLICE/MSLICE/LNCF to find
1309 * a valid sliceid value. DSS steering is the only type of steering
1310 * that utilizes the 'subsliceid' bits.
1312 * Also note that, even though the steering domain is called "GSlice"
1313 * and it is encoded in the register using the gslice format, the spec
1314 * says that the combined (geometry | compute) fuse should be used to
1315 * select the steering.
1318 /* Find the potential gslice candidates */
1319 slice_mask = intel_slicemask_from_xehp_dssmask(sseu->subslice_mask,
1320 GEN_DSS_PER_GSLICE);
1323 * Find the potential LNCF candidates. Either LNCF within a valid
1326 for_each_set_bit(i, >->info.mslice_mask, GEN12_MAX_MSLICES)
1327 lncf_mask |= (0x3 << (i * 2));
1330 * Are there any sliceid values that work for both GSLICE and LNCF
1333 if (slice_mask & lncf_mask) {
1334 slice_mask &= lncf_mask;
1335 gt->steering_table[LNCF] = NULL;
1338 /* How about sliceid values that also work for MSLICE steering? */
1339 if (slice_mask & gt->info.mslice_mask) {
1340 slice_mask &= gt->info.mslice_mask;
1341 gt->steering_table[MSLICE] = NULL;
1344 if (IS_XEHPSDV(gt->i915) && slice_mask & BIT(0))
1345 gt->steering_table[GAM] = NULL;
1347 slice = __ffs(slice_mask);
1348 subslice = intel_sseu_find_first_xehp_dss(sseu, GEN_DSS_PER_GSLICE, slice) %
1351 __add_mcr_wa(gt, wal, slice, subslice);
1354 * SQIDI ranges are special because they use different steering
1355 * registers than everything else we work with. On XeHP SDV and
1356 * DG2-G10, any value in the steering registers will work fine since
1357 * all instances are present, but DG2-G11 only has SQIDI instances at
1358 * ID's 2 and 3, so we need to steer to one of those. For simplicity
1359 * we'll just steer to a hardcoded "2" since that value will work
1362 __set_mcr_steering(wal, MCFG_MCR_SELECTOR, 0, 2);
1363 __set_mcr_steering(wal, SF_MCR_SELECTOR, 0, 2);
1366 * On DG2, GAM registers have a dedicated steering control register
1367 * and must always be programmed to a hardcoded groupid of "1."
1369 if (IS_DG2(gt->i915))
1370 __set_mcr_steering(wal, GAM_MCR_SELECTOR, 1, 0);
1374 pvc_init_mcr(struct intel_gt *gt, struct i915_wa_list *wal)
1379 * Setup implicit steering for COMPUTE and DSS ranges to the first
1380 * non-fused-off DSS. All other types of MCR registers will be
1381 * explicitly steered.
1383 dss = intel_sseu_find_first_xehp_dss(>->info.sseu, 0, 0);
1384 __add_mcr_wa(gt, wal, dss / GEN_DSS_PER_CSLICE, dss % GEN_DSS_PER_CSLICE);
1388 icl_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1390 struct drm_i915_private *i915 = gt->i915;
1392 icl_wa_init_mcr(gt, wal);
1394 /* WaModifyGamTlbPartitioning:icl */
1395 wa_write_clr_set(wal,
1396 GEN11_GACB_PERF_CTRL,
1397 GEN11_HASH_CTRL_MASK,
1398 GEN11_HASH_CTRL_BIT0 | GEN11_HASH_CTRL_BIT4);
1400 /* Wa_1405766107:icl
1401 * Formerly known as WaCL2SFHalfMaxAlloc
1405 GEN11_LSN_UNSLCVC_GAFS_HALF_SF_MAXALLOC |
1406 GEN11_LSN_UNSLCVC_GAFS_HALF_CL2_MAXALLOC);
1409 * Formerly known as WaDisCtxReload
1412 GEN8_GAMW_ECO_DEV_RW_IA,
1413 GAMW_ECO_DEV_CTX_RELOAD_DISABLE);
1415 /* Wa_1406463099:icl
1416 * Formerly known as WaGamTlbPendError
1420 GAMT_CHKN_DISABLE_L3_COH_PIPE);
1423 * Wa_1408615072:icl,ehl (vsunit)
1424 * Wa_1407596294:icl,ehl (hsunit)
1426 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
1427 VSUNIT_CLKGATE_DIS | HSUNIT_CLKGATE_DIS);
1429 /* Wa_1407352427:icl,ehl */
1430 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2,
1431 PSDUNIT_CLKGATE_DIS);
1433 /* Wa_1406680159:icl,ehl */
1434 wa_mcr_write_or(wal,
1435 GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE,
1436 GWUNIT_CLKGATE_DIS);
1438 /* Wa_1607087056:icl,ehl,jsl */
1439 if (IS_ICELAKE(i915) ||
1440 ((IS_JASPERLAKE(i915) || IS_ELKHARTLAKE(i915)) &&
1441 IS_GRAPHICS_STEP(i915, STEP_A0, STEP_B0)))
1443 GEN11_SLICE_UNIT_LEVEL_CLKGATE,
1444 L3_CLKGATE_DIS | L3_CR2X_CLKGATE_DIS);
1447 * This is not a documented workaround, but rather an optimization
1448 * to reduce sampler power.
1450 wa_mcr_write_clr(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
1454 * Though there are per-engine instances of these registers,
1455 * they retain their value through engine resets and should
1456 * only be provided on the GT workaround list rather than
1457 * the engine-specific workaround list.
1460 wa_14011060649(struct intel_gt *gt, struct i915_wa_list *wal)
1462 struct intel_engine_cs *engine;
1465 for_each_engine(engine, gt, id) {
1466 if (engine->class != VIDEO_DECODE_CLASS ||
1467 (engine->instance % 2))
1470 wa_write_or(wal, VDBOX_CGCTL3F10(engine->mmio_base),
1471 IECPUNIT_CLKGATE_DIS);
1476 gen12_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1478 icl_wa_init_mcr(gt, wal);
1480 /* Wa_14011060649:tgl,rkl,dg1,adl-s,adl-p */
1481 wa_14011060649(gt, wal);
1483 /* Wa_14011059788:tgl,rkl,adl-s,dg1,adl-p */
1484 wa_mcr_write_or(wal, GEN10_DFR_RATIO_EN_AND_CHICKEN, DFR_DISABLE);
1489 * Firmware on some gen12 platforms locks the MISCCPCTL register,
1490 * preventing i915 from modifying it for this workaround. Skip the
1491 * readback verification for this workaround on debug builds; if the
1492 * workaround doesn't stick due to firmware behavior, it's not an error
1493 * that we want CI to flag.
1495 wa_add(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE,
1500 dg1_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1502 gen12_gt_workarounds_init(gt, wal);
1504 /* Wa_1409420604:dg1 */
1505 wa_mcr_write_or(wal, SUBSLICE_UNIT_LEVEL_CLKGATE2,
1506 CPSSUNIT_CLKGATE_DIS);
1508 /* Wa_1408615072:dg1 */
1509 /* Empirical testing shows this register is unaffected by engine reset. */
1510 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE2, VSUNIT_CLKGATE_DIS_TGL);
1514 xehpsdv_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1516 struct drm_i915_private *i915 = gt->i915;
1518 xehp_init_mcr(gt, wal);
1520 /* Wa_1409757795:xehpsdv */
1521 wa_mcr_write_or(wal, SCCGCTL94DC, CG3DDISURB);
1523 /* Wa_18011725039:xehpsdv */
1524 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_B0)) {
1525 wa_mcr_masked_dis(wal, MLTICTXCTL, TDONRENDER);
1526 wa_mcr_write_or(wal, L3SQCREG1_CCS0, FLUSHALLNONCOH);
1529 /* Wa_16011155590:xehpsdv */
1530 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A0, STEP_B0))
1531 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
1532 TSGUNIT_CLKGATE_DIS);
1534 /* Wa_14011780169:xehpsdv */
1535 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_B0, STEP_FOREVER)) {
1536 wa_write_or(wal, UNSLCGCTL9440, GAMTLBOACS_CLKGATE_DIS |
1537 GAMTLBVDBOX7_CLKGATE_DIS |
1538 GAMTLBVDBOX6_CLKGATE_DIS |
1539 GAMTLBVDBOX5_CLKGATE_DIS |
1540 GAMTLBVDBOX4_CLKGATE_DIS |
1541 GAMTLBVDBOX3_CLKGATE_DIS |
1542 GAMTLBVDBOX2_CLKGATE_DIS |
1543 GAMTLBVDBOX1_CLKGATE_DIS |
1544 GAMTLBVDBOX0_CLKGATE_DIS |
1545 GAMTLBKCR_CLKGATE_DIS |
1546 GAMTLBGUC_CLKGATE_DIS |
1547 GAMTLBBLT_CLKGATE_DIS);
1548 wa_write_or(wal, UNSLCGCTL9444, GAMTLBGFXA0_CLKGATE_DIS |
1549 GAMTLBGFXA1_CLKGATE_DIS |
1550 GAMTLBCOMPA0_CLKGATE_DIS |
1551 GAMTLBCOMPA1_CLKGATE_DIS |
1552 GAMTLBCOMPB0_CLKGATE_DIS |
1553 GAMTLBCOMPB1_CLKGATE_DIS |
1554 GAMTLBCOMPC0_CLKGATE_DIS |
1555 GAMTLBCOMPC1_CLKGATE_DIS |
1556 GAMTLBCOMPD0_CLKGATE_DIS |
1557 GAMTLBCOMPD1_CLKGATE_DIS |
1558 GAMTLBMERT_CLKGATE_DIS |
1559 GAMTLBVEBOX3_CLKGATE_DIS |
1560 GAMTLBVEBOX2_CLKGATE_DIS |
1561 GAMTLBVEBOX1_CLKGATE_DIS |
1562 GAMTLBVEBOX0_CLKGATE_DIS);
1565 /* Wa_16012725990:xehpsdv */
1566 if (IS_XEHPSDV_GRAPHICS_STEP(i915, STEP_A1, STEP_FOREVER))
1567 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE, VFUNIT_CLKGATE_DIS);
1569 /* Wa_14011060649:xehpsdv */
1570 wa_14011060649(gt, wal);
1572 /* Wa_14012362059:xehpsdv */
1573 wa_mcr_write_or(wal, XEHP_MERT_MOD_CTRL, FORCE_MISS_FTLB);
1575 /* Wa_14014368820:xehpsdv */
1576 wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
1577 INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
1579 /* Wa_14010670810:xehpsdv */
1580 wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
1584 dg2_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1586 xehp_init_mcr(gt, wal);
1588 /* Wa_14011060649:dg2 */
1589 wa_14011060649(gt, wal);
1591 if (IS_DG2_G10(gt->i915)) {
1592 /* Wa_22010523718:dg2 */
1593 wa_write_or(wal, UNSLICE_UNIT_LEVEL_CLKGATE,
1594 CG3DDISCFEG_CLKGATE_DIS);
1596 /* Wa_14011006942:dg2 */
1597 wa_mcr_write_or(wal, GEN11_SUBSLICE_UNIT_LEVEL_CLKGATE,
1598 DSS_ROUTER_CLKGATE_DIS);
1601 /* Wa_14014830051:dg2 */
1602 wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
1606 * Skip verification for possibly locked register.
1608 wa_add(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE,
1611 /* Wa_18018781329 */
1612 wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
1613 wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
1614 wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
1615 wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
1617 /* Wa_1509235366:dg2 */
1618 wa_mcr_write_or(wal, XEHP_GAMCNTRL_CTRL,
1619 INVALIDATION_BROADCAST_MODE_DIS | GLOBAL_INVALIDATION_MODE);
1621 /* Wa_14010648519:dg2 */
1622 wa_mcr_write_or(wal, XEHP_L3NODEARBCFG, XEHP_LNESPARE);
1626 pvc_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1628 pvc_init_mcr(gt, wal);
1630 /* Wa_14015795083 */
1631 wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
1633 /* Wa_18018781329 */
1634 wa_mcr_write_or(wal, RENDER_MOD_CTRL, FORCE_MISS_FTLB);
1635 wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
1636 wa_mcr_write_or(wal, XEHP_VDBX_MOD_CTRL, FORCE_MISS_FTLB);
1637 wa_mcr_write_or(wal, XEHP_VEBX_MOD_CTRL, FORCE_MISS_FTLB);
1639 /* Wa_16016694945 */
1640 wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_OVRLSCCC);
1644 xelpg_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1646 /* Wa_14018778641 / Wa_18018781329 */
1647 wa_mcr_write_or(wal, COMP_MOD_CTRL, FORCE_MISS_FTLB);
1649 /* Wa_22016670082 */
1650 wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE);
1652 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
1653 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
1654 /* Wa_14014830051 */
1655 wa_mcr_write_clr(wal, SARB_CHICKEN1, COMP_CKN_IN);
1657 /* Wa_14015795083 */
1658 wa_write_clr(wal, GEN7_MISCCPCTL, GEN12_DOP_CLOCK_GATE_RENDER_ENABLE);
1662 * Unlike older platforms, we no longer setup implicit steering here;
1663 * all MCR accesses are explicitly steered.
1665 debug_dump_steering(gt);
1669 wa_16021867713(struct intel_gt *gt, struct i915_wa_list *wal)
1671 struct intel_engine_cs *engine;
1674 for_each_engine(engine, gt, id)
1675 if (engine->class == VIDEO_DECODE_CLASS)
1676 wa_write_or(wal, VDBOX_CGCTL3F1C(engine->mmio_base),
1677 MFXPIPE_CLKGATE_DIS);
1681 xelpmp_gt_workarounds_init(struct intel_gt *gt, struct i915_wa_list *wal)
1683 wa_16021867713(gt, wal);
1689 * Note that although these registers are MCR on the primary
1690 * GT, the media GT's versions are regular singleton registers.
1692 wa_write_or(wal, XELPMP_GSC_MOD_CTRL, FORCE_MISS_FTLB);
1694 /* Wa_22016670082 */
1695 wa_write_or(wal, GEN12_SQCNT1, GEN12_STRICT_RAR_ENABLE);
1697 debug_dump_steering(gt);
1701 * The bspec performance guide has recommended MMIO tuning settings. These
1702 * aren't truly "workarounds" but we want to program them through the
1703 * workaround infrastructure to make sure they're (re)applied at the proper
1706 * The programming in this function is for settings that persist through
1707 * engine resets and also are not part of any engine's register state context.
1708 * I.e., settings that only need to be re-applied in the event of a full GT
1711 static void gt_tuning_settings(struct intel_gt *gt, struct i915_wa_list *wal)
1713 if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71))) {
1714 wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
1715 wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
1718 if (IS_PONTEVECCHIO(gt->i915)) {
1719 wa_mcr_write(wal, XEHPC_L3SCRUB,
1720 SCRUB_CL_DWNGRADE_SHARED | SCRUB_RATE_4B_PER_CLK);
1721 wa_mcr_masked_en(wal, XEHPC_LNCFMISCCFGREG0, XEHPC_HOSTCACHEEN);
1724 if (IS_DG2(gt->i915)) {
1725 wa_mcr_write_or(wal, XEHP_L3SCQREG7, BLEND_FILL_CACHING_OPT_DIS);
1726 wa_mcr_write_or(wal, XEHP_SQCM, EN_32B_ACCESS);
1731 gt_init_workarounds(struct intel_gt *gt, struct i915_wa_list *wal)
1733 struct drm_i915_private *i915 = gt->i915;
1735 gt_tuning_settings(gt, wal);
1737 if (gt->type == GT_MEDIA) {
1738 if (MEDIA_VER_FULL(i915) == IP_VER(13, 0))
1739 xelpmp_gt_workarounds_init(gt, wal);
1741 MISSING_CASE(MEDIA_VER_FULL(i915));
1746 if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)))
1747 xelpg_gt_workarounds_init(gt, wal);
1748 else if (IS_PONTEVECCHIO(i915))
1749 pvc_gt_workarounds_init(gt, wal);
1750 else if (IS_DG2(i915))
1751 dg2_gt_workarounds_init(gt, wal);
1752 else if (IS_XEHPSDV(i915))
1753 xehpsdv_gt_workarounds_init(gt, wal);
1754 else if (IS_DG1(i915))
1755 dg1_gt_workarounds_init(gt, wal);
1756 else if (GRAPHICS_VER(i915) == 12)
1757 gen12_gt_workarounds_init(gt, wal);
1758 else if (GRAPHICS_VER(i915) == 11)
1759 icl_gt_workarounds_init(gt, wal);
1760 else if (IS_COFFEELAKE(i915) || IS_COMETLAKE(i915))
1761 cfl_gt_workarounds_init(gt, wal);
1762 else if (IS_GEMINILAKE(i915))
1763 glk_gt_workarounds_init(gt, wal);
1764 else if (IS_KABYLAKE(i915))
1765 kbl_gt_workarounds_init(gt, wal);
1766 else if (IS_BROXTON(i915))
1767 gen9_gt_workarounds_init(gt, wal);
1768 else if (IS_SKYLAKE(i915))
1769 skl_gt_workarounds_init(gt, wal);
1770 else if (IS_HASWELL(i915))
1771 hsw_gt_workarounds_init(gt, wal);
1772 else if (IS_VALLEYVIEW(i915))
1773 vlv_gt_workarounds_init(gt, wal);
1774 else if (IS_IVYBRIDGE(i915))
1775 ivb_gt_workarounds_init(gt, wal);
1776 else if (GRAPHICS_VER(i915) == 6)
1777 snb_gt_workarounds_init(gt, wal);
1778 else if (GRAPHICS_VER(i915) == 5)
1779 ilk_gt_workarounds_init(gt, wal);
1780 else if (IS_G4X(i915))
1781 g4x_gt_workarounds_init(gt, wal);
1782 else if (GRAPHICS_VER(i915) == 4)
1783 gen4_gt_workarounds_init(gt, wal);
1784 else if (GRAPHICS_VER(i915) <= 8)
1787 MISSING_CASE(GRAPHICS_VER(i915));
1790 void intel_gt_init_workarounds(struct intel_gt *gt)
1792 struct i915_wa_list *wal = >->wa_list;
1794 wa_init_start(wal, gt, "GT", "global");
1795 gt_init_workarounds(gt, wal);
1796 wa_init_finish(wal);
1800 wa_verify(struct intel_gt *gt, const struct i915_wa *wa, u32 cur,
1801 const char *name, const char *from)
1803 if ((cur ^ wa->set) & wa->read) {
1805 "%s workaround lost on %s! (reg[%x]=0x%x, relevant bits were 0x%x vs expected 0x%x)\n",
1806 name, from, i915_mmio_reg_offset(wa->reg),
1807 cur, cur & wa->read, wa->set & wa->read);
1815 static void wa_list_apply(const struct i915_wa_list *wal)
1817 struct intel_gt *gt = wal->gt;
1818 struct intel_uncore *uncore = gt->uncore;
1819 enum forcewake_domains fw;
1820 unsigned long flags;
1827 fw = wal_get_fw_for_rmw(uncore, wal);
1829 intel_gt_mcr_lock(gt, &flags);
1830 spin_lock(&uncore->lock);
1831 intel_uncore_forcewake_get__locked(uncore, fw);
1833 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
1836 /* open-coded rmw due to steering */
1839 intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
1840 intel_uncore_read_fw(uncore, wa->reg);
1841 val = (old & ~wa->clr) | wa->set;
1842 if (val != old || !wa->clr) {
1844 intel_gt_mcr_multicast_write_fw(gt, wa->mcr_reg, val);
1846 intel_uncore_write_fw(uncore, wa->reg, val);
1849 if (IS_ENABLED(CONFIG_DRM_I915_DEBUG_GEM)) {
1850 u32 val = wa->is_mcr ?
1851 intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
1852 intel_uncore_read_fw(uncore, wa->reg);
1854 wa_verify(gt, wa, val, wal->name, "application");
1858 intel_uncore_forcewake_put__locked(uncore, fw);
1859 spin_unlock(&uncore->lock);
1860 intel_gt_mcr_unlock(gt, flags);
1863 void intel_gt_apply_workarounds(struct intel_gt *gt)
1865 wa_list_apply(>->wa_list);
1868 static bool wa_list_verify(struct intel_gt *gt,
1869 const struct i915_wa_list *wal,
1872 struct intel_uncore *uncore = gt->uncore;
1874 enum forcewake_domains fw;
1875 unsigned long flags;
1879 fw = wal_get_fw_for_rmw(uncore, wal);
1881 intel_gt_mcr_lock(gt, &flags);
1882 spin_lock(&uncore->lock);
1883 intel_uncore_forcewake_get__locked(uncore, fw);
1885 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
1886 ok &= wa_verify(wal->gt, wa, wa->is_mcr ?
1887 intel_gt_mcr_read_any_fw(gt, wa->mcr_reg) :
1888 intel_uncore_read_fw(uncore, wa->reg),
1891 intel_uncore_forcewake_put__locked(uncore, fw);
1892 spin_unlock(&uncore->lock);
1893 intel_gt_mcr_unlock(gt, flags);
1898 bool intel_gt_verify_workarounds(struct intel_gt *gt, const char *from)
1900 return wa_list_verify(gt, >->wa_list, from);
1904 static bool is_nonpriv_flags_valid(u32 flags)
1906 /* Check only valid flag bits are set */
1907 if (flags & ~RING_FORCE_TO_NONPRIV_MASK_VALID)
1910 /* NB: Only 3 out of 4 enum values are valid for access field */
1911 if ((flags & RING_FORCE_TO_NONPRIV_ACCESS_MASK) ==
1912 RING_FORCE_TO_NONPRIV_ACCESS_INVALID)
1919 whitelist_reg_ext(struct i915_wa_list *wal, i915_reg_t reg, u32 flags)
1921 struct i915_wa wa = {
1925 if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
1928 if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
1931 wa.reg.reg |= flags;
1936 whitelist_mcr_reg_ext(struct i915_wa_list *wal, i915_mcr_reg_t reg, u32 flags)
1938 struct i915_wa wa = {
1943 if (GEM_DEBUG_WARN_ON(wal->count >= RING_MAX_NONPRIV_SLOTS))
1946 if (GEM_DEBUG_WARN_ON(!is_nonpriv_flags_valid(flags)))
1949 wa.mcr_reg.reg |= flags;
1954 whitelist_reg(struct i915_wa_list *wal, i915_reg_t reg)
1956 whitelist_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
1960 whitelist_mcr_reg(struct i915_wa_list *wal, i915_mcr_reg_t reg)
1962 whitelist_mcr_reg_ext(wal, reg, RING_FORCE_TO_NONPRIV_ACCESS_RW);
1965 static void gen9_whitelist_build(struct i915_wa_list *w)
1967 /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt,glk,cfl */
1968 whitelist_reg(w, GEN9_CTX_PREEMPT_REG);
1970 /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl,cfl,[cnl] */
1971 whitelist_reg(w, GEN8_CS_CHICKEN1);
1973 /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl,glk,cfl */
1974 whitelist_reg(w, GEN8_HDC_CHICKEN1);
1976 /* WaSendPushConstantsFromMMIO:skl,bxt */
1977 whitelist_reg(w, COMMON_SLICE_CHICKEN2);
1980 static void skl_whitelist_build(struct intel_engine_cs *engine)
1982 struct i915_wa_list *w = &engine->whitelist;
1984 if (engine->class != RENDER_CLASS)
1987 gen9_whitelist_build(w);
1989 /* WaDisableLSQCROPERFforOCL:skl */
1990 whitelist_mcr_reg(w, GEN8_L3SQCREG4);
1993 static void bxt_whitelist_build(struct intel_engine_cs *engine)
1995 if (engine->class != RENDER_CLASS)
1998 gen9_whitelist_build(&engine->whitelist);
2001 static void kbl_whitelist_build(struct intel_engine_cs *engine)
2003 struct i915_wa_list *w = &engine->whitelist;
2005 if (engine->class != RENDER_CLASS)
2008 gen9_whitelist_build(w);
2010 /* WaDisableLSQCROPERFforOCL:kbl */
2011 whitelist_mcr_reg(w, GEN8_L3SQCREG4);
2014 static void glk_whitelist_build(struct intel_engine_cs *engine)
2016 struct i915_wa_list *w = &engine->whitelist;
2018 if (engine->class != RENDER_CLASS)
2021 gen9_whitelist_build(w);
2023 /* WA #0862: Userspace has to set "Barrier Mode" to avoid hangs. */
2024 whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
2027 static void cfl_whitelist_build(struct intel_engine_cs *engine)
2029 struct i915_wa_list *w = &engine->whitelist;
2031 if (engine->class != RENDER_CLASS)
2034 gen9_whitelist_build(w);
2037 * WaAllowPMDepthAndInvocationCountAccessFromUMD:cfl,whl,cml,aml
2039 * This covers 4 register which are next to one another :
2040 * - PS_INVOCATION_COUNT
2041 * - PS_INVOCATION_COUNT_UDW
2043 * - PS_DEPTH_COUNT_UDW
2045 whitelist_reg_ext(w, PS_INVOCATION_COUNT,
2046 RING_FORCE_TO_NONPRIV_ACCESS_RD |
2047 RING_FORCE_TO_NONPRIV_RANGE_4);
2050 static void allow_read_ctx_timestamp(struct intel_engine_cs *engine)
2052 struct i915_wa_list *w = &engine->whitelist;
2054 if (engine->class != RENDER_CLASS)
2055 whitelist_reg_ext(w,
2056 RING_CTX_TIMESTAMP(engine->mmio_base),
2057 RING_FORCE_TO_NONPRIV_ACCESS_RD);
2060 static void cml_whitelist_build(struct intel_engine_cs *engine)
2062 allow_read_ctx_timestamp(engine);
2064 cfl_whitelist_build(engine);
2067 static void icl_whitelist_build(struct intel_engine_cs *engine)
2069 struct i915_wa_list *w = &engine->whitelist;
2071 allow_read_ctx_timestamp(engine);
2073 switch (engine->class) {
2075 /* WaAllowUMDToModifyHalfSliceChicken7:icl */
2076 whitelist_mcr_reg(w, GEN9_HALF_SLICE_CHICKEN7);
2078 /* WaAllowUMDToModifySamplerMode:icl */
2079 whitelist_mcr_reg(w, GEN10_SAMPLER_MODE);
2081 /* WaEnableStateCacheRedirectToCS:icl */
2082 whitelist_reg(w, GEN9_SLICE_COMMON_ECO_CHICKEN1);
2085 * WaAllowPMDepthAndInvocationCountAccessFromUMD:icl
2087 * This covers 4 register which are next to one another :
2088 * - PS_INVOCATION_COUNT
2089 * - PS_INVOCATION_COUNT_UDW
2091 * - PS_DEPTH_COUNT_UDW
2093 whitelist_reg_ext(w, PS_INVOCATION_COUNT,
2094 RING_FORCE_TO_NONPRIV_ACCESS_RD |
2095 RING_FORCE_TO_NONPRIV_RANGE_4);
2098 case VIDEO_DECODE_CLASS:
2099 /* hucStatusRegOffset */
2100 whitelist_reg_ext(w, _MMIO(0x2000 + engine->mmio_base),
2101 RING_FORCE_TO_NONPRIV_ACCESS_RD);
2102 /* hucUKernelHdrInfoRegOffset */
2103 whitelist_reg_ext(w, _MMIO(0x2014 + engine->mmio_base),
2104 RING_FORCE_TO_NONPRIV_ACCESS_RD);
2105 /* hucStatus2RegOffset */
2106 whitelist_reg_ext(w, _MMIO(0x23B0 + engine->mmio_base),
2107 RING_FORCE_TO_NONPRIV_ACCESS_RD);
2115 static void tgl_whitelist_build(struct intel_engine_cs *engine)
2117 struct i915_wa_list *w = &engine->whitelist;
2119 allow_read_ctx_timestamp(engine);
2121 switch (engine->class) {
2124 * WaAllowPMDepthAndInvocationCountAccessFromUMD:tgl
2127 * This covers 4 registers which are next to one another :
2128 * - PS_INVOCATION_COUNT
2129 * - PS_INVOCATION_COUNT_UDW
2131 * - PS_DEPTH_COUNT_UDW
2133 whitelist_reg_ext(w, PS_INVOCATION_COUNT,
2134 RING_FORCE_TO_NONPRIV_ACCESS_RD |
2135 RING_FORCE_TO_NONPRIV_RANGE_4);
2139 * Wa_14012131227:dg1
2140 * Wa_1508744258:tgl,rkl,dg1,adl-s,adl-p
2142 whitelist_reg(w, GEN7_COMMON_SLICE_CHICKEN1);
2144 /* Wa_1806527549:tgl */
2145 whitelist_reg(w, HIZ_CHICKEN);
2147 /* Required by recommended tuning setting (not a workaround) */
2148 whitelist_reg(w, GEN11_COMMON_SLICE_CHICKEN3);
2156 static void dg2_whitelist_build(struct intel_engine_cs *engine)
2158 struct i915_wa_list *w = &engine->whitelist;
2160 switch (engine->class) {
2162 /* Required by recommended tuning setting (not a workaround) */
2163 whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
2171 static void blacklist_trtt(struct intel_engine_cs *engine)
2173 struct i915_wa_list *w = &engine->whitelist;
2176 * Prevent read/write access to [0x4400, 0x4600) which covers
2177 * the TRTT range across all engines. Note that normally userspace
2178 * cannot access the other engines' trtt control, but for simplicity
2179 * we cover the entire range on each engine.
2181 whitelist_reg_ext(w, _MMIO(0x4400),
2182 RING_FORCE_TO_NONPRIV_DENY |
2183 RING_FORCE_TO_NONPRIV_RANGE_64);
2184 whitelist_reg_ext(w, _MMIO(0x4500),
2185 RING_FORCE_TO_NONPRIV_DENY |
2186 RING_FORCE_TO_NONPRIV_RANGE_64);
2189 static void pvc_whitelist_build(struct intel_engine_cs *engine)
2191 /* Wa_16014440446:pvc */
2192 blacklist_trtt(engine);
2195 static void xelpg_whitelist_build(struct intel_engine_cs *engine)
2197 struct i915_wa_list *w = &engine->whitelist;
2199 switch (engine->class) {
2201 /* Required by recommended tuning setting (not a workaround) */
2202 whitelist_mcr_reg(w, XEHP_COMMON_SLICE_CHICKEN3);
2210 void intel_engine_init_whitelist(struct intel_engine_cs *engine)
2212 struct drm_i915_private *i915 = engine->i915;
2213 struct i915_wa_list *w = &engine->whitelist;
2215 wa_init_start(w, engine->gt, "whitelist", engine->name);
2217 if (engine->gt->type == GT_MEDIA)
2219 else if (IS_GFX_GT_IP_RANGE(engine->gt, IP_VER(12, 70), IP_VER(12, 71)))
2220 xelpg_whitelist_build(engine);
2221 else if (IS_PONTEVECCHIO(i915))
2222 pvc_whitelist_build(engine);
2223 else if (IS_DG2(i915))
2224 dg2_whitelist_build(engine);
2225 else if (IS_XEHPSDV(i915))
2227 else if (GRAPHICS_VER(i915) == 12)
2228 tgl_whitelist_build(engine);
2229 else if (GRAPHICS_VER(i915) == 11)
2230 icl_whitelist_build(engine);
2231 else if (IS_COMETLAKE(i915))
2232 cml_whitelist_build(engine);
2233 else if (IS_COFFEELAKE(i915))
2234 cfl_whitelist_build(engine);
2235 else if (IS_GEMINILAKE(i915))
2236 glk_whitelist_build(engine);
2237 else if (IS_KABYLAKE(i915))
2238 kbl_whitelist_build(engine);
2239 else if (IS_BROXTON(i915))
2240 bxt_whitelist_build(engine);
2241 else if (IS_SKYLAKE(i915))
2242 skl_whitelist_build(engine);
2243 else if (GRAPHICS_VER(i915) <= 8)
2246 MISSING_CASE(GRAPHICS_VER(i915));
2251 void intel_engine_apply_whitelist(struct intel_engine_cs *engine)
2253 const struct i915_wa_list *wal = &engine->whitelist;
2254 struct intel_uncore *uncore = engine->uncore;
2255 const u32 base = engine->mmio_base;
2262 for (i = 0, wa = wal->list; i < wal->count; i++, wa++)
2263 intel_uncore_write(uncore,
2264 RING_FORCE_TO_NONPRIV(base, i),
2265 i915_mmio_reg_offset(wa->reg));
2267 /* And clear the rest just in case of garbage */
2268 for (; i < RING_MAX_NONPRIV_SLOTS; i++)
2269 intel_uncore_write(uncore,
2270 RING_FORCE_TO_NONPRIV(base, i),
2271 i915_mmio_reg_offset(RING_NOPID(base)));
2275 * engine_fake_wa_init(), a place holder to program the registers
2276 * which are not part of an official workaround defined by the
2278 * Adding programming of those register inside workaround will
2279 * allow utilizing wa framework to proper application and verification.
2282 engine_fake_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2287 * RING_CMD_CCTL specifies the default MOCS entry that will be used
2288 * by the command streamer when executing commands that don't have
2289 * a way to explicitly specify a MOCS setting. The default should
2290 * usually reference whichever MOCS entry corresponds to uncached
2291 * behavior, although use of a WB cached entry is recommended by the
2292 * spec in certain circumstances on specific platforms.
2294 if (GRAPHICS_VER(engine->i915) >= 12) {
2295 mocs_r = engine->gt->mocs.uc_index;
2296 mocs_w = engine->gt->mocs.uc_index;
2298 if (HAS_L3_CCS_READ(engine->i915) &&
2299 engine->class == COMPUTE_CLASS) {
2300 mocs_r = engine->gt->mocs.wb_index;
2303 * Even on the few platforms where MOCS 0 is a
2304 * legitimate table entry, it's never the correct
2305 * setting to use here; we can assume the MOCS init
2306 * just forgot to initialize wb_index.
2308 drm_WARN_ON(&engine->i915->drm, mocs_r == 0);
2311 wa_masked_field_set(wal,
2312 RING_CMD_CCTL(engine->mmio_base),
2314 CMD_CCTL_MOCS_OVERRIDE(mocs_w, mocs_r));
2319 rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2321 struct drm_i915_private *i915 = engine->i915;
2322 struct intel_gt *gt = engine->gt;
2324 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2325 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0)) {
2326 /* Wa_22014600077 */
2327 wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
2328 ENABLE_EU_COUNT_FOR_TDL_FLUSH);
2331 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2332 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
2335 wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
2336 SC_DISABLE_POWER_OPTIMIZATION_EBB);
2339 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2341 /* Wa_22012856258 */
2342 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
2343 GEN12_DISABLE_READ_SUPPRESSION);
2348 * Wa_22010960976:dg2
2349 * Wa_14013347512:dg2
2351 wa_mcr_masked_dis(wal, XEHP_HDC_CHICKEN0,
2352 LSC_L1_FLUSH_CTL_3D_DATAPORT_FLUSH_EVENTS_MASK);
2355 if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) ||
2357 /* Wa_14015150844 */
2358 wa_mcr_add(wal, XEHP_HDC_CHICKEN0, 0,
2359 _MASKED_BIT_ENABLE(DIS_ATOMIC_CHAINING_TYPED_WRITES),
2363 if (IS_DG2(i915) || IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
2364 IS_DG1(i915) || IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2366 * Wa_1606700617:tgl,dg1,adl-p
2367 * Wa_22010271021:tgl,rkl,dg1,adl-s,adl-p
2368 * Wa_14010826681:tgl,dg1,rkl,adl-p
2369 * Wa_18019627453:dg2
2372 GEN9_CS_DEBUG_MODE1,
2373 FF_DOP_CLOCK_GATE_DISABLE);
2376 if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) || IS_DG1(i915) ||
2377 IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2378 /* Wa_1606931601:tgl,rkl,dg1,adl-s,adl-p */
2379 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2, GEN12_DISABLE_EARLY_READ);
2382 * Wa_1407928979:tgl A*
2383 * Wa_18011464164:tgl[B0+],dg1[B0+]
2384 * Wa_22010931296:tgl[B0+],dg1[B0+]
2385 * Wa_14010919138:rkl,dg1,adl-s,adl-p
2387 wa_write_or(wal, GEN7_FF_THREAD_MODE,
2388 GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
2390 /* Wa_1406941453:tgl,rkl,dg1,adl-s,adl-p */
2391 wa_mcr_masked_en(wal,
2396 if (IS_ALDERLAKE_P(i915) || IS_ALDERLAKE_S(i915) ||
2397 IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915)) {
2399 wa_mcr_masked_en(wal, GEN8_ROW_CHICKEN2,
2400 GEN12_PUSH_CONST_DEREF_HOLD_DIS);
2402 /* Wa_14010229206 */
2403 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, GEN12_DISABLE_TDL_PUSH);
2406 if (IS_ROCKETLAKE(i915) || IS_TIGERLAKE(i915) || IS_ALDERLAKE_P(i915)) {
2410 * On TGL and RKL there are multiple entries for this WA in the
2411 * BSpec; some indicate this is an A0-only WA, others indicate
2412 * it applies to all steppings so we trust the "all steppings."
2415 RING_PSMI_CTL(RENDER_RING_BASE),
2416 GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE |
2417 GEN8_RC_SEMA_IDLE_MSG_DISABLE);
2420 if (GRAPHICS_VER(i915) == 11) {
2421 /* This is not an Wa. Enable for better image quality */
2424 _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE);
2428 * Formerly known as WaGAPZPriorityScheme
2432 GEN11_ARBITRATION_PRIO_ORDER_MASK);
2436 * Formerly known as WaL3BankAddressHashing
2438 wa_write_clr_set(wal,
2440 GEN11_HASH_CTRL_EXCL_MASK,
2441 GEN11_HASH_CTRL_EXCL_BIT0);
2442 wa_write_clr_set(wal,
2444 GEN11_BANK_HASH_ADDR_EXCL_MASK,
2445 GEN11_BANK_HASH_ADDR_EXCL_BIT0);
2449 * Formerly known as WaDisableCleanEvicts
2451 wa_mcr_write_or(wal,
2453 GEN11_LQSC_CLEAN_EVICT_DISABLE);
2455 /* Wa_1606682166:icl */
2458 GEN7_DISABLE_SAMPLER_PREFETCH);
2460 /* Wa_1409178092:icl */
2461 wa_mcr_write_clr_set(wal,
2463 GEN11_COHERENT_PARTIAL_WRITE_MERGE_ENABLE,
2466 /* WaEnable32PlaneMode:icl */
2467 wa_masked_en(wal, GEN9_CSFE_CHICKEN1_RCS,
2468 GEN11_ENABLE_32_PLANE_MODE);
2471 * Wa_1408767742:icl[a2..forever],ehl[all]
2472 * Wa_1605460711:icl[a0..c0]
2475 GEN7_FF_THREAD_MODE,
2476 GEN12_FF_TESSELATION_DOP_GATE_DISABLE);
2478 /* Wa_22010271021 */
2480 GEN9_CS_DEBUG_MODE1,
2481 FF_DOP_CLOCK_GATE_DISABLE);
2485 * Intel platforms that support fine-grained preemption (i.e., gen9 and
2486 * beyond) allow the kernel-mode driver to choose between two different
2487 * options for controlling preemption granularity and behavior.
2489 * Option 1 (hardware default):
2490 * Preemption settings are controlled in a global manner via
2491 * kernel-only register CS_DEBUG_MODE1 (0x20EC). Any granularity
2492 * and settings chosen by the kernel-mode driver will apply to all
2493 * userspace clients.
2496 * Preemption settings are controlled on a per-context basis via
2497 * register CS_CHICKEN1 (0x2580). CS_CHICKEN1 is saved/restored on
2498 * context switch and is writable by userspace (e.g., via
2499 * MI_LOAD_REGISTER_IMMEDIATE instructions placed in a batch buffer)
2500 * which allows different userspace drivers/clients to select
2501 * different settings, or to change those settings on the fly in
2502 * response to runtime needs. This option was known by name
2503 * "FtrPerCtxtPreemptionGranularityControl" at one time, although
2504 * that name is somewhat misleading as other non-granularity
2505 * preemption settings are also impacted by this decision.
2507 * On Linux, our policy has always been to let userspace drivers
2508 * control preemption granularity/settings (Option 2). This was
2509 * originally mandatory on gen9 to prevent ABI breakage (old gen9
2510 * userspace developed before object-level preemption was enabled would
2511 * not behave well if i915 were to go with Option 1 and enable that
2512 * preemption in a global manner). On gen9 each context would have
2513 * object-level preemption disabled by default (see
2514 * WaDisable3DMidCmdPreemption in gen9_ctx_workarounds_init), but
2515 * userspace drivers could opt-in to object-level preemption as they
2516 * saw fit. For post-gen9 platforms, we continue to utilize Option 2;
2517 * even though it is no longer necessary for ABI compatibility when
2518 * enabling a new platform, it does ensure that userspace will be able
2519 * to implement any workarounds that show up requiring temporary
2520 * adjustments to preemption behavior at runtime.
2522 * Notes/Workarounds:
2523 * - Wa_14015141709: On DG2 and early steppings of MTL,
2524 * CS_CHICKEN1[0] does not disable object-level preemption as
2525 * it is supposed to (nor does CS_DEBUG_MODE1[0] if we had been
2526 * using Option 1). Effectively this means userspace is unable
2527 * to disable object-level preemption on these platforms/steppings
2528 * despite the setting here.
2530 * - Wa_16013994831: May require that userspace program
2531 * CS_CHICKEN1[10] when certain runtime conditions are true.
2532 * Userspace requires Option 2 to be in effect for their update of
2533 * CS_CHICKEN1[10] to be effective.
2535 * Other workarounds may appear in the future that will also require
2536 * Option 2 behavior to allow proper userspace implementation.
2538 if (GRAPHICS_VER(i915) >= 9)
2540 GEN7_FF_SLICE_CS_CHICKEN1,
2541 GEN9_FFSC_PERCTX_PREEMPT_CTRL);
2543 if (IS_SKYLAKE(i915) ||
2544 IS_KABYLAKE(i915) ||
2545 IS_COFFEELAKE(i915) ||
2546 IS_COMETLAKE(i915)) {
2547 /* WaEnableGapsTsvCreditFix:skl,kbl,cfl */
2550 GEN9_GAPS_TSV_CREDIT_DISABLE);
2553 if (IS_BROXTON(i915)) {
2554 /* WaDisablePooledEuLoadBalancingFix:bxt */
2556 FF_SLICE_CS_CHICKEN2,
2557 GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
2560 if (GRAPHICS_VER(i915) == 9) {
2561 /* WaContextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl,glk,cfl */
2563 GEN9_CSFE_CHICKEN1_RCS,
2564 GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE);
2566 /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl,glk,cfl */
2567 wa_mcr_write_or(wal,
2569 GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
2571 /* WaProgramL3SqcReg1DefaultForPerf:bxt,glk */
2572 if (IS_GEN9_LP(i915))
2573 wa_mcr_write_clr_set(wal,
2575 L3_PRIO_CREDITS_MASK,
2576 L3_GENERAL_PRIO_CREDITS(62) |
2577 L3_HIGH_PRIO_CREDITS(2));
2579 /* WaOCLCoherentLineFlush:skl,bxt,kbl,cfl */
2580 wa_mcr_write_or(wal,
2582 GEN8_LQSC_FLUSH_COHERENT_LINES);
2584 /* Disable atomics in L3 to prevent unrecoverable hangs */
2585 wa_write_clr_set(wal, GEN9_SCRATCH_LNCF1,
2586 GEN9_LNCF_NONIA_COHERENT_ATOMICS_ENABLE, 0);
2587 wa_mcr_write_clr_set(wal, GEN8_L3SQCREG4,
2588 GEN8_LQSQ_NONIA_COHERENT_ATOMICS_ENABLE, 0);
2589 wa_mcr_write_clr_set(wal, GEN9_SCRATCH1,
2590 EVICTION_PERF_FIX_ENABLE, 0);
2593 if (IS_HASWELL(i915)) {
2594 /* WaSampleCChickenBitEnable:hsw */
2596 HSW_HALF_SLICE_CHICKEN3, HSW_SAMPLE_C_PERFORMANCE);
2600 /* enable HiZ Raw Stall Optimization */
2601 HIZ_RAW_STALL_OPT_DISABLE);
2604 if (IS_VALLEYVIEW(i915)) {
2605 /* WaDisableEarlyCull:vlv */
2608 _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
2611 * WaVSThreadDispatchOverride:ivb,vlv
2613 * This actually overrides the dispatch
2614 * mode for all thread types.
2616 wa_write_clr_set(wal,
2617 GEN7_FF_THREAD_MODE,
2619 GEN7_FF_TS_SCHED_HW |
2620 GEN7_FF_VS_SCHED_HW |
2621 GEN7_FF_DS_SCHED_HW);
2623 /* WaPsdDispatchEnable:vlv */
2624 /* WaDisablePSDDualDispatchEnable:vlv */
2626 GEN7_HALF_SLICE_CHICKEN1,
2627 GEN7_MAX_PS_THREAD_DEP |
2628 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
2631 if (IS_IVYBRIDGE(i915)) {
2632 /* WaDisableEarlyCull:ivb */
2635 _3D_CHICKEN_SF_DISABLE_OBJEND_CULL);
2637 if (0) { /* causes HiZ corruption on ivb:gt1 */
2638 /* enable HiZ Raw Stall Optimization */
2641 HIZ_RAW_STALL_OPT_DISABLE);
2645 * WaVSThreadDispatchOverride:ivb,vlv
2647 * This actually overrides the dispatch
2648 * mode for all thread types.
2650 wa_write_clr_set(wal,
2651 GEN7_FF_THREAD_MODE,
2653 GEN7_FF_TS_SCHED_HW |
2654 GEN7_FF_VS_SCHED_HW |
2655 GEN7_FF_DS_SCHED_HW);
2657 /* WaDisablePSDDualDispatchEnable:ivb */
2658 if (IS_IVB_GT1(i915))
2660 GEN7_HALF_SLICE_CHICKEN1,
2661 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
2664 if (GRAPHICS_VER(i915) == 7) {
2665 /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */
2667 RING_MODE_GEN7(RENDER_RING_BASE),
2668 GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE);
2670 /* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */
2671 wa_masked_dis(wal, CACHE_MODE_0_GEN7, RC_OP_FLUSH_ENABLE);
2674 * BSpec says this must be set, even though
2675 * WaDisable4x2SubspanOptimization:ivb,hsw
2676 * WaDisable4x2SubspanOptimization isn't listed for VLV.
2680 PIXEL_SUBSPAN_COLLECT_OPT_DISABLE);
2683 * BSpec recommends 8x4 when MSAA is used,
2684 * however in practice 16x4 seems fastest.
2686 * Note that PS/WM thread counts depend on the WIZ hashing
2687 * disable bit, which we don't touch here, but it's good
2688 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
2690 wa_masked_field_set(wal,
2692 GEN6_WIZ_HASHING_MASK,
2693 GEN6_WIZ_HASHING_16x4);
2696 if (IS_GRAPHICS_VER(i915, 6, 7))
2698 * We need to disable the AsyncFlip performance optimisations in
2699 * order to use MI_WAIT_FOR_EVENT within the CS. It should
2700 * already be programmed to '1' on all products.
2702 * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv
2705 RING_MI_MODE(RENDER_RING_BASE),
2706 ASYNC_FLIP_PERF_DISABLE);
2708 if (GRAPHICS_VER(i915) == 6) {
2710 * Required for the hardware to program scanline values for
2712 * WaEnableFlushTlbInvalidationMode:snb
2716 GFX_TLB_INVALIDATE_EXPLICIT);
2718 /* WaDisableHiZPlanesWhenMSAAEnabled:snb */
2721 _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB);
2725 /* WaStripsFansDisableFastClipPerformanceFix:snb */
2726 _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL |
2729 * "This bit must be set if 3DSTATE_CLIP clip mode is set
2730 * to normal and 3DSTATE_SF number of SF output attributes
2733 _3D_CHICKEN3_SF_DISABLE_PIPELINED_ATTR_FETCH);
2736 * BSpec recommends 8x4 when MSAA is used,
2737 * however in practice 16x4 seems fastest.
2739 * Note that PS/WM thread counts depend on the WIZ hashing
2740 * disable bit, which we don't touch here, but it's good
2741 * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
2743 wa_masked_field_set(wal,
2745 GEN6_WIZ_HASHING_MASK,
2746 GEN6_WIZ_HASHING_16x4);
2748 /* WaDisable_RenderCache_OperationalFlush:snb */
2749 wa_masked_dis(wal, CACHE_MODE_0, RC_OP_FLUSH_ENABLE);
2752 * From the Sandybridge PRM, volume 1 part 3, page 24:
2753 * "If this bit is set, STCunit will have LRA as replacement
2754 * policy. [...] This bit must be reset. LRA replacement
2755 * policy is not supported."
2759 CM0_STC_EVICT_DISABLE_LRA_SNB);
2762 if (IS_GRAPHICS_VER(i915, 4, 6))
2763 /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */
2764 wa_add(wal, RING_MI_MODE(RENDER_RING_BASE),
2765 0, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH),
2766 /* XXX bit doesn't stick on Broadwater */
2767 IS_I965G(i915) ? 0 : VS_TIMER_DISPATCH, true);
2769 if (GRAPHICS_VER(i915) == 4)
2771 * Disable CONSTANT_BUFFER before it is loaded from the context
2772 * image. For as it is loaded, it is executed and the stored
2773 * address may no longer be valid, leading to a GPU hang.
2775 * This imposes the requirement that userspace reload their
2776 * CONSTANT_BUFFER on every batch, fortunately a requirement
2777 * they are already accustomed to from before contexts were
2780 wa_add(wal, ECOSKPD(RENDER_RING_BASE),
2781 0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE),
2782 0 /* XXX bit doesn't stick on Broadwater */,
2787 xcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2789 struct drm_i915_private *i915 = engine->i915;
2791 /* WaKBLVECSSemaphoreWaitPoll:kbl */
2792 if (IS_KABYLAKE(i915) && IS_GRAPHICS_STEP(i915, STEP_A0, STEP_F0)) {
2794 RING_SEMA_WAIT_POLL(engine->mmio_base),
2797 /* Wa_16018031267, Wa_16018063123 */
2798 if (NEEDS_FASTCOLOR_BLT_WABB(engine))
2799 wa_masked_field_set(wal, ECOSKPD(engine->mmio_base),
2800 XEHP_BLITTER_SCHEDULING_MODE_MASK,
2801 XEHP_BLITTER_ROUND_ROBIN_MODE);
2805 ccs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2807 if (IS_PVC_CT_STEP(engine->i915, STEP_A0, STEP_C0)) {
2808 /* Wa_14014999345:pvc */
2809 wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS, DISABLE_ECC);
2814 * The bspec performance guide has recommended MMIO tuning settings. These
2815 * aren't truly "workarounds" but we want to program them with the same
2816 * workaround infrastructure to ensure that they're automatically added to
2817 * the GuC save/restore lists, re-applied at the right times, and checked for
2818 * any conflicting programming requested by real workarounds.
2820 * Programming settings should be added here only if their registers are not
2821 * part of an engine's register state context. If a register is part of a
2822 * context, then any tuning settings should be programmed in an appropriate
2823 * function invoked by __intel_engine_init_ctx_wa().
2826 add_render_compute_tuning_settings(struct intel_gt *gt,
2827 struct i915_wa_list *wal)
2829 struct drm_i915_private *i915 = gt->i915;
2831 if (IS_GFX_GT_IP_RANGE(gt, IP_VER(12, 70), IP_VER(12, 71)) || IS_DG2(i915))
2832 wa_mcr_write_clr_set(wal, RT_CTRL, STACKID_CTRL, STACKID_CTRL_512);
2835 * This tuning setting proves beneficial only on ATS-M designs; the
2836 * default "age based" setting is optimal on regular DG2 and other
2839 if (INTEL_INFO(i915)->tuning_thread_rr_after_dep)
2840 wa_mcr_masked_field_set(wal, GEN9_ROW_CHICKEN4, THREAD_EX_ARB_MODE,
2841 THREAD_EX_ARB_MODE_RR_AFTER_DEP);
2843 if (GRAPHICS_VER(i915) == 12 && GRAPHICS_VER_FULL(i915) < IP_VER(12, 50))
2844 wa_write_clr(wal, GEN8_GARBCNTL, GEN12_BUS_HASH_CTL_BIT_EXC);
2848 * The workarounds in this function apply to shared registers in
2849 * the general render reset domain that aren't tied to a
2850 * specific engine. Since all render+compute engines get reset
2851 * together, and the contents of these registers are lost during
2852 * the shared render domain reset, we'll define such workarounds
2853 * here and then add them to just a single RCS or CCS engine's
2854 * workaround list (whichever engine has the XXXX flag).
2857 general_render_compute_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2859 struct drm_i915_private *i915 = engine->i915;
2860 struct intel_gt *gt = engine->gt;
2862 add_render_compute_tuning_settings(gt, wal);
2864 if (GRAPHICS_VER(i915) >= 11) {
2865 /* This is not a Wa (although referred to as
2866 * WaSetInidrectStateOverride in places), this allows
2867 * applications that reference sampler states through
2868 * the BindlessSamplerStateBaseAddress to have their
2869 * border color relative to DynamicStateBaseAddress
2870 * rather than BindlessSamplerStateBaseAddress.
2872 * Otherwise SAMPLER_STATE border colors have to be
2873 * copied in multiple heaps (DynamicStateBaseAddress &
2874 * BindlessSamplerStateBaseAddress)
2878 wa_mcr_masked_en(wal,
2880 GEN11_INDIRECT_STATE_BASE_ADDR_OVERRIDE);
2883 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_B0, STEP_FOREVER) ||
2884 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_B0, STEP_FOREVER))
2885 /* Wa_14017856879 */
2886 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN3, MTL_DISABLE_FIX_FOR_EOT_FLUSH);
2888 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2889 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))
2894 wa_mcr_masked_en(wal, GEN10_SAMPLER_MODE,
2895 MTL_DISABLE_SAMPLER_SC_OOO);
2897 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0))
2898 /* Wa_22015279794 */
2899 wa_mcr_masked_en(wal, GEN10_CACHE_MODE_SS,
2900 DISABLE_PREFETCH_INTO_IC);
2902 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2903 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
2905 /* Wa_22013037850 */
2906 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW,
2907 DISABLE_128B_EVICTION_COMMAND_UDW);
2909 /* Wa_18017747507 */
2910 wa_masked_en(wal, VFG_PREEMPTION_CHICKEN, POLYGON_TRIFAN_LINELOOP_DISABLE);
2913 if (IS_GFX_GT_IP_STEP(gt, IP_VER(12, 70), STEP_A0, STEP_B0) ||
2914 IS_GFX_GT_IP_STEP(gt, IP_VER(12, 71), STEP_A0, STEP_B0) ||
2915 IS_PONTEVECCHIO(i915) ||
2917 /* Wa_22014226127 */
2918 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0, DISABLE_D8_D16_COASLESCE);
2921 if (IS_PONTEVECCHIO(i915) || IS_DG2(i915)) {
2922 /* Wa_14015227452:dg2,pvc */
2923 wa_mcr_masked_en(wal, GEN9_ROW_CHICKEN4, XEHP_DIS_BBL_SYSPIPE);
2925 /* Wa_16015675438:dg2,pvc */
2926 wa_masked_en(wal, FF_SLICE_CS_CHICKEN2, GEN12_PERF_FIX_BALANCING_CFE_DISABLE);
2931 * Wa_16011620976:dg2_g11
2932 * Wa_22015475538:dg2
2934 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, DIS_CHAIN_2XSIMD8);
2936 /* Wa_18028616096 */
2937 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0_UDW, UGM_FRAGMENT_THRESHOLD_TO_3);
2940 if (IS_DG2_G11(i915)) {
2942 * Wa_22012826095:dg2
2943 * Wa_22013059131:dg2
2945 wa_mcr_write_clr_set(wal, LSC_CHICKEN_BIT_0_UDW,
2947 REG_FIELD_PREP(MAXREQS_PER_BANK, 2));
2949 /* Wa_22013059131:dg2 */
2950 wa_mcr_write_or(wal, LSC_CHICKEN_BIT_0,
2951 FORCE_1_SUB_MESSAGE_PER_FRAGMENT);
2956 * Note that register 0xE420 is write-only and cannot be read
2957 * back for verification on DG2 (due to Wa_14012342262), so
2958 * we need to explicitly skip the readback.
2960 wa_mcr_add(wal, GEN10_CACHE_MODE_SS, 0,
2961 _MASKED_BIT_ENABLE(ENABLE_PREFETCH_INTO_IC),
2962 0 /* write-only, so skip validation */,
2966 if (IS_XEHPSDV(i915)) {
2968 wa_mcr_masked_en(wal,
2970 SYSTOLIC_DOP_CLOCK_GATING_DIS);
2973 wa_mcr_masked_en(wal,
2975 GEN12_DISABLE_GRF_CLEAR);
2977 /* Wa_14010449647:xehpsdv */
2978 wa_mcr_masked_en(wal, GEN8_HALF_SLICE_CHICKEN1,
2979 GEN7_PSD_SINGLE_PORT_DISPATCH_ENABLE);
2984 engine_init_workarounds(struct intel_engine_cs *engine, struct i915_wa_list *wal)
2986 if (GRAPHICS_VER(engine->i915) < 4)
2989 engine_fake_wa_init(engine, wal);
2992 * These are common workarounds that just need to applied
2993 * to a single RCS/CCS engine's workaround list since
2994 * they're reset as part of the general render domain reset.
2996 if (engine->flags & I915_ENGINE_FIRST_RENDER_COMPUTE)
2997 general_render_compute_wa_init(engine, wal);
2999 if (engine->class == COMPUTE_CLASS)
3000 ccs_engine_wa_init(engine, wal);
3001 else if (engine->class == RENDER_CLASS)
3002 rcs_engine_wa_init(engine, wal);
3004 xcs_engine_wa_init(engine, wal);
3007 void intel_engine_init_workarounds(struct intel_engine_cs *engine)
3009 struct i915_wa_list *wal = &engine->wa_list;
3011 wa_init_start(wal, engine->gt, "engine", engine->name);
3012 engine_init_workarounds(engine, wal);
3013 wa_init_finish(wal);
3016 void intel_engine_apply_workarounds(struct intel_engine_cs *engine)
3018 wa_list_apply(&engine->wa_list);
3021 static const struct i915_range mcr_ranges_gen8[] = {
3022 { .start = 0x5500, .end = 0x55ff },
3023 { .start = 0x7000, .end = 0x7fff },
3024 { .start = 0x9400, .end = 0x97ff },
3025 { .start = 0xb000, .end = 0xb3ff },
3026 { .start = 0xe000, .end = 0xe7ff },
3030 static const struct i915_range mcr_ranges_gen12[] = {
3031 { .start = 0x8150, .end = 0x815f },
3032 { .start = 0x9520, .end = 0x955f },
3033 { .start = 0xb100, .end = 0xb3ff },
3034 { .start = 0xde80, .end = 0xe8ff },
3035 { .start = 0x24a00, .end = 0x24a7f },
3039 static const struct i915_range mcr_ranges_xehp[] = {
3040 { .start = 0x4000, .end = 0x4aff },
3041 { .start = 0x5200, .end = 0x52ff },
3042 { .start = 0x5400, .end = 0x7fff },
3043 { .start = 0x8140, .end = 0x815f },
3044 { .start = 0x8c80, .end = 0x8dff },
3045 { .start = 0x94d0, .end = 0x955f },
3046 { .start = 0x9680, .end = 0x96ff },
3047 { .start = 0xb000, .end = 0xb3ff },
3048 { .start = 0xc800, .end = 0xcfff },
3049 { .start = 0xd800, .end = 0xd8ff },
3050 { .start = 0xdc00, .end = 0xffff },
3051 { .start = 0x17000, .end = 0x17fff },
3052 { .start = 0x24a00, .end = 0x24a7f },
3056 static bool mcr_range(struct drm_i915_private *i915, u32 offset)
3058 const struct i915_range *mcr_ranges;
3061 if (GRAPHICS_VER_FULL(i915) >= IP_VER(12, 50))
3062 mcr_ranges = mcr_ranges_xehp;
3063 else if (GRAPHICS_VER(i915) >= 12)
3064 mcr_ranges = mcr_ranges_gen12;
3065 else if (GRAPHICS_VER(i915) >= 8)
3066 mcr_ranges = mcr_ranges_gen8;
3071 * Registers in these ranges are affected by the MCR selector
3072 * which only controls CPU initiated MMIO. Routing does not
3073 * work for CS access so we cannot verify them on this path.
3075 for (i = 0; mcr_ranges[i].start; i++)
3076 if (offset >= mcr_ranges[i].start &&
3077 offset <= mcr_ranges[i].end)
3084 wa_list_srm(struct i915_request *rq,
3085 const struct i915_wa_list *wal,
3086 struct i915_vma *vma)
3088 struct drm_i915_private *i915 = rq->i915;
3089 unsigned int i, count = 0;
3090 const struct i915_wa *wa;
3093 srm = MI_STORE_REGISTER_MEM | MI_SRM_LRM_GLOBAL_GTT;
3094 if (GRAPHICS_VER(i915) >= 8)
3097 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
3098 if (!mcr_range(i915, i915_mmio_reg_offset(wa->reg)))
3102 cs = intel_ring_begin(rq, 4 * count);
3106 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
3107 u32 offset = i915_mmio_reg_offset(wa->reg);
3109 if (mcr_range(i915, offset))
3114 *cs++ = i915_ggtt_offset(vma) + sizeof(u32) * i;
3117 intel_ring_advance(rq, cs);
3122 static int engine_wa_list_verify(struct intel_context *ce,
3123 const struct i915_wa_list * const wal,
3126 const struct i915_wa *wa;
3127 struct i915_request *rq;
3128 struct i915_vma *vma;
3129 struct i915_gem_ww_ctx ww;
3137 vma = __vm_create_scratch_for_read(&ce->engine->gt->ggtt->vm,
3138 wal->count * sizeof(u32));
3140 return PTR_ERR(vma);
3142 intel_engine_pm_get(ce->engine);
3143 i915_gem_ww_ctx_init(&ww, false);
3145 err = i915_gem_object_lock(vma->obj, &ww);
3147 err = intel_context_pin_ww(ce, &ww);
3151 err = i915_vma_pin_ww(vma, &ww, 0, 0,
3152 i915_vma_is_ggtt(vma) ? PIN_GLOBAL : PIN_USER);
3156 rq = i915_request_create(ce);
3162 err = i915_vma_move_to_active(vma, rq, EXEC_OBJECT_WRITE);
3164 err = wa_list_srm(rq, wal, vma);
3166 i915_request_get(rq);
3168 i915_request_set_error_once(rq, err);
3169 i915_request_add(rq);
3174 if (i915_request_wait(rq, 0, HZ / 5) < 0) {
3179 results = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
3180 if (IS_ERR(results)) {
3181 err = PTR_ERR(results);
3186 for (i = 0, wa = wal->list; i < wal->count; i++, wa++) {
3187 if (mcr_range(rq->i915, i915_mmio_reg_offset(wa->reg)))
3190 if (!wa_verify(wal->gt, wa, results[i], wal->name, from))
3194 i915_gem_object_unpin_map(vma->obj);
3197 i915_request_put(rq);
3199 i915_vma_unpin(vma);
3201 intel_context_unpin(ce);
3203 if (err == -EDEADLK) {
3204 err = i915_gem_ww_ctx_backoff(&ww);
3208 i915_gem_ww_ctx_fini(&ww);
3209 intel_engine_pm_put(ce->engine);
3214 int intel_engine_verify_workarounds(struct intel_engine_cs *engine,
3217 return engine_wa_list_verify(engine->kernel_context,
3222 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
3223 #include "selftest_workarounds.c"