2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Eddie Dong <eddie.dong@intel.com>
25 * Kevin Tian <kevin.tian@intel.com>
28 * Zhi Wang <zhi.a.wang@intel.com>
29 * Changbin Du <changbin.du@intel.com>
30 * Zhenyu Wang <zhenyuw@linux.intel.com>
31 * Tina Zhang <tina.zhang@intel.com>
32 * Bing Niu <bing.niu@intel.com>
41 * Defined in Intel Open Source PRM.
42 * Ref: https://01.org/linuxgraphics/documentation/hardware-specification-prms
44 #define TRVATTL3PTRDW(i) _MMIO(0x4de0 + (i)*4)
45 #define TRNULLDETCT _MMIO(0x4de8)
46 #define TRINVTILEDETCT _MMIO(0x4dec)
47 #define TRVADR _MMIO(0x4df0)
48 #define TRTTE _MMIO(0x4df4)
49 #define RING_EXCC(base) _MMIO((base) + 0x28)
50 #define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
51 #define VF_GUARDBAND _MMIO(0x83a4)
53 /* Raw offset is appened to each line for convenience. */
54 static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
55 {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
56 {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
57 {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
58 {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
59 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
60 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
61 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
62 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
63 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
64 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
65 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
66 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
67 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
68 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
69 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
70 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
71 {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
72 {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
73 {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
74 {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
75 {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
76 {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
78 {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
79 {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
80 {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
81 {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
82 {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
83 {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
86 static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
87 {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
88 {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
89 {RCS, HWSTAM, 0x0, false}, /* 0x2098 */
90 {RCS, INSTPM, 0xffff, true}, /* 0x20c0 */
91 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
92 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
93 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
94 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
95 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
96 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
97 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
98 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
99 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
100 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
101 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
102 {RCS, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
103 {RCS, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
104 {RCS, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
105 {RCS, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
106 {RCS, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
107 {RCS, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
108 {RCS, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
110 {RCS, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */
111 {RCS, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */
112 {RCS, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */
113 {RCS, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
114 {RCS, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
115 {RCS, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
116 {RCS, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
117 {RCS, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
118 {RCS, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
119 {RCS, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
120 {RCS, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
121 {RCS, TRVATTL3PTRDW(0), 0, false}, /* 0x4de0 */
122 {RCS, TRVATTL3PTRDW(1), 0, false}, /* 0x4de4 */
123 {RCS, TRNULLDETCT, 0, false}, /* 0x4de8 */
124 {RCS, TRINVTILEDETCT, 0, false}, /* 0x4dec */
125 {RCS, TRVADR, 0, false}, /* 0x4df0 */
126 {RCS, TRTTE, 0, false}, /* 0x4df4 */
128 {BCS, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
129 {BCS, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
130 {BCS, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
131 {BCS, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
132 {BCS, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */
134 {VCS2, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
136 {VECS, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */
138 {RCS, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */
139 {RCS, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
140 {RCS, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */
141 {RCS, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
143 {RCS, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
144 {RCS, GEN9_CSFE_CHICKEN1_RCS, 0x0, false}, /* 0x20d4 */
146 {RCS, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
147 {RCS, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
148 {RCS, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
149 {RCS, INVALID_MMIO_REG, 0, false } /* Terminated */
154 u32 control_table[I915_NUM_ENGINES][64];
158 static void load_render_mocs(struct drm_i915_private *dev_priv)
170 for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
171 offset.reg = regs[ring_id];
172 for (i = 0; i < 64; i++) {
173 gen9_render_mocs.control_table[ring_id][i] =
174 I915_READ_FW(offset);
180 for (i = 0; i < 32; i++) {
181 gen9_render_mocs.l3cc_table[i] =
182 I915_READ_FW(offset);
185 gen9_render_mocs.initialized = true;
188 static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
190 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
191 struct intel_vgpu_submission *s = &vgpu->submission;
192 enum forcewake_domains fw;
202 if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
205 if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending))
208 reg = _MMIO(regs[ring_id]);
210 /* WaForceWakeRenderDuringMmioTLBInvalidate:skl
211 * we need to put a forcewake when invalidating RCS TLB caches,
212 * otherwise device can go to RC6 state and interrupt invalidation
215 fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
216 FW_REG_READ | FW_REG_WRITE);
217 if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)))
218 fw |= FORCEWAKE_RENDER;
220 intel_uncore_forcewake_get(dev_priv, fw);
222 I915_WRITE_FW(reg, 0x1);
224 if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
225 gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
227 vgpu_vreg_t(vgpu, reg) = 0;
229 intel_uncore_forcewake_put(dev_priv, fw);
231 gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
234 static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
237 struct drm_i915_private *dev_priv;
238 i915_reg_t offset, l3_offset;
250 dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
251 if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
254 if (!pre && !gen9_render_mocs.initialized)
255 load_render_mocs(dev_priv);
257 offset.reg = regs[ring_id];
258 for (i = 0; i < 64; i++) {
260 old_v = vgpu_vreg_t(pre, offset);
262 old_v = gen9_render_mocs.control_table[ring_id][i];
264 new_v = vgpu_vreg_t(next, offset);
266 new_v = gen9_render_mocs.control_table[ring_id][i];
269 I915_WRITE_FW(offset, new_v);
274 if (ring_id == RCS) {
275 l3_offset.reg = 0xb020;
276 for (i = 0; i < 32; i++) {
278 old_v = vgpu_vreg_t(pre, l3_offset);
280 old_v = gen9_render_mocs.l3cc_table[i];
282 new_v = vgpu_vreg_t(next, l3_offset);
284 new_v = gen9_render_mocs.l3cc_table[i];
287 I915_WRITE_FW(l3_offset, new_v);
294 #define CTX_CONTEXT_CONTROL_VAL 0x03
296 /* Switch ring mmio values (context). */
297 static void switch_mmio(struct intel_vgpu *pre,
298 struct intel_vgpu *next,
301 struct drm_i915_private *dev_priv;
302 struct intel_vgpu_submission *s;
303 u32 *reg_state, ctx_ctrl;
305 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
306 struct engine_mmio *mmio;
309 dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
310 if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
311 switch_mocs(pre, next, ring_id);
313 for (mmio = dev_priv->gvt->engine_mmio_list;
314 i915_mmio_reg_valid(mmio->reg); mmio++) {
315 if (mmio->ring_id != ring_id)
319 vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg);
321 vgpu_vreg_t(pre, mmio->reg) &=
323 old_v = vgpu_vreg_t(pre, mmio->reg);
325 old_v = mmio->value = I915_READ_FW(mmio->reg);
329 s = &next->submission;
331 s->shadow_ctx->engine[ring_id].lrc_reg_state;
332 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
334 * if it is an inhibit context, load in_context mmio
335 * into HW by mmio write. If it is not, skip this mmio
338 if (mmio->in_context &&
339 (ctx_ctrl & inhibit_mask) != inhibit_mask)
343 new_v = vgpu_vreg_t(next, mmio->reg) |
346 new_v = vgpu_vreg_t(next, mmio->reg);
348 if (mmio->in_context)
351 new_v = mmio->value | (mmio->mask << 16);
356 I915_WRITE_FW(mmio->reg, new_v);
358 trace_render_mmio(pre ? pre->id : 0,
361 i915_mmio_reg_offset(mmio->reg),
366 handle_tlb_pending_event(next, ring_id);
370 * intel_gvt_switch_render_mmio - switch mmio context of specific engine
371 * @pre: the last vGPU that own the engine
372 * @next: the vGPU to switch to
373 * @ring_id: specify the engine
375 * If pre is null indicates that host own the engine. If next is null
376 * indicates that we are switching to host workload.
378 void intel_gvt_switch_mmio(struct intel_vgpu *pre,
379 struct intel_vgpu *next, int ring_id)
381 struct drm_i915_private *dev_priv;
383 if (WARN_ON(!pre && !next))
386 gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
387 pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
389 dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
392 * We are using raw mmio access wrapper to improve the
393 * performace for batch mmio read/write, so we need
394 * handle forcewake mannually.
396 intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
397 switch_mmio(pre, next, ring_id);
398 intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
402 * intel_gvt_init_engine_mmio_context - Initiate the engine mmio list
406 void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
408 if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
409 gvt->engine_mmio_list = gen9_engine_mmio_list;
411 gvt->engine_mmio_list = gen8_engine_mmio_list;