2 * Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
24 * Eddie Dong <eddie.dong@intel.com>
25 * Kevin Tian <kevin.tian@intel.com>
28 * Zhi Wang <zhi.a.wang@intel.com>
29 * Changbin Du <changbin.du@intel.com>
30 * Zhenyu Wang <zhenyuw@linux.intel.com>
31 * Tina Zhang <tina.zhang@intel.com>
32 * Bing Niu <bing.niu@intel.com>
37 #include "gt/intel_context.h"
41 #define GEN9_MOCS_SIZE 64
43 /* Raw offset is appened to each line for convenience. */
44 static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
45 {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
46 {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
47 {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */
48 {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */
49 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
50 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
51 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
52 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
53 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
54 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
55 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
56 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
57 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
58 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
59 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
60 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
61 {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
62 {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
63 {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
64 {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
65 {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
66 {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
68 {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
69 {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
70 {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
71 {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
72 {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
73 {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */
76 static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
77 {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
78 {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
79 {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */
80 {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */
81 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 0), 0, false}, /* 0x24d0 */
82 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 1), 0, false}, /* 0x24d4 */
83 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 2), 0, false}, /* 0x24d8 */
84 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 3), 0, false}, /* 0x24dc */
85 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 4), 0, false}, /* 0x24e0 */
86 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 5), 0, false}, /* 0x24e4 */
87 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 6), 0, false}, /* 0x24e8 */
88 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 7), 0, false}, /* 0x24ec */
89 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 8), 0, false}, /* 0x24f0 */
90 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 9), 0, false}, /* 0x24f4 */
91 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 10), 0, false}, /* 0x24f8 */
92 {RCS0, RING_FORCE_TO_NONPRIV(RENDER_RING_BASE, 11), 0, false}, /* 0x24fc */
93 {RCS0, CACHE_MODE_1, 0xffff, true}, /* 0x7004 */
94 {RCS0, GEN7_GT_MODE, 0xffff, true}, /* 0x7008 */
95 {RCS0, CACHE_MODE_0_GEN7, 0xffff, true}, /* 0x7000 */
96 {RCS0, GEN7_COMMON_SLICE_CHICKEN1, 0xffff, true}, /* 0x7010 */
97 {RCS0, HDC_CHICKEN0, 0xffff, true}, /* 0x7300 */
98 {RCS0, VF_GUARDBAND, 0xffff, true}, /* 0x83a4 */
100 {RCS0, GEN8_PRIVATE_PAT_LO, 0, false}, /* 0x40e0 */
101 {RCS0, GEN8_PRIVATE_PAT_HI, 0, false}, /* 0x40e4 */
102 {RCS0, GEN8_CS_CHICKEN1, 0xffff, true}, /* 0x2580 */
103 {RCS0, COMMON_SLICE_CHICKEN2, 0xffff, true}, /* 0x7014 */
104 {RCS0, GEN9_CS_DEBUG_MODE1, 0xffff, false}, /* 0x20ec */
105 {RCS0, GEN8_L3SQCREG4, 0, false}, /* 0xb118 */
106 {RCS0, GEN7_HALF_SLICE_CHICKEN1, 0xffff, true}, /* 0xe100 */
107 {RCS0, HALF_SLICE_CHICKEN2, 0xffff, true}, /* 0xe180 */
108 {RCS0, HALF_SLICE_CHICKEN3, 0xffff, true}, /* 0xe184 */
109 {RCS0, GEN9_HALF_SLICE_CHICKEN5, 0xffff, true}, /* 0xe188 */
110 {RCS0, GEN9_HALF_SLICE_CHICKEN7, 0xffff, true}, /* 0xe194 */
111 {RCS0, GEN8_ROW_CHICKEN, 0xffff, true}, /* 0xe4f0 */
112 {RCS0, TRVATTL3PTRDW(0), 0, true}, /* 0x4de0 */
113 {RCS0, TRVATTL3PTRDW(1), 0, true}, /* 0x4de4 */
114 {RCS0, TRNULLDETCT, 0, true}, /* 0x4de8 */
115 {RCS0, TRINVTILEDETCT, 0, true}, /* 0x4dec */
116 {RCS0, TRVADR, 0, true}, /* 0x4df0 */
117 {RCS0, TRTTE, 0, true}, /* 0x4df4 */
118 {RCS0, _MMIO(0x4dfc), 0, true},
120 {BCS0, RING_GFX_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2229c */
121 {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
122 {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
123 {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
124 {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
126 {VCS1, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
128 {VECS0, RING_EXCC(VEBOX_RING_BASE), 0xffff, false}, /* 0x1a028 */
130 {RCS0, GEN8_HDC_CHICKEN1, 0xffff, true}, /* 0x7304 */
131 {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */
132 {RCS0, GEN7_UCGCTL4, 0x0, false}, /* 0x940c */
133 {RCS0, GAMT_CHKN_BIT_REG, 0x0, false}, /* 0x4ab8 */
135 {RCS0, GEN9_GAMT_ECO_REG_RW_IA, 0x0, false}, /* 0x4ab0 */
136 {RCS0, GEN9_CSFE_CHICKEN1_RCS, 0xffff, false}, /* 0x20d4 */
137 {RCS0, _MMIO(0x20D8), 0xffff, true}, /* 0x20d8 */
139 {RCS0, GEN8_GARBCNTL, 0x0, false}, /* 0xb004 */
140 {RCS0, GEN7_FF_THREAD_MODE, 0x0, false}, /* 0x20a0 */
141 {RCS0, FF_SLICE_CS_CHICKEN2, 0xffff, false}, /* 0x20e4 */
142 {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */
147 u32 control_table[I915_NUM_ENGINES][GEN9_MOCS_SIZE];
148 u32 l3cc_table[GEN9_MOCS_SIZE / 2];
151 static void load_render_mocs(struct drm_i915_private *dev_priv)
163 for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
164 if (!HAS_ENGINE(dev_priv, ring_id))
166 offset.reg = regs[ring_id];
167 for (i = 0; i < GEN9_MOCS_SIZE; i++) {
168 gen9_render_mocs.control_table[ring_id][i] =
169 I915_READ_FW(offset);
175 for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
176 gen9_render_mocs.l3cc_table[i] =
177 I915_READ_FW(offset);
180 gen9_render_mocs.initialized = true;
184 restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu,
185 struct i915_request *req)
189 struct engine_mmio *mmio;
190 struct intel_gvt *gvt = vgpu->gvt;
191 int ring_id = req->engine->id;
192 int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id];
197 ret = req->engine->emit_flush(req, EMIT_BARRIER);
201 cs = intel_ring_begin(req, count * 2 + 2);
205 *cs++ = MI_LOAD_REGISTER_IMM(count);
206 for (mmio = gvt->engine_mmio_list.mmio;
207 i915_mmio_reg_valid(mmio->reg); mmio++) {
208 if (mmio->ring_id != ring_id ||
212 *cs++ = i915_mmio_reg_offset(mmio->reg);
213 *cs++ = vgpu_vreg_t(vgpu, mmio->reg) |
215 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
216 *(cs-2), *(cs-1), vgpu->id, ring_id);
220 intel_ring_advance(req, cs);
222 ret = req->engine->emit_flush(req, EMIT_BARRIER);
230 restore_render_mocs_control_for_inhibit(struct intel_vgpu *vgpu,
231 struct i915_request *req)
236 cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE + 2);
240 *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE);
242 for (index = 0; index < GEN9_MOCS_SIZE; index++) {
243 *cs++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index));
244 *cs++ = vgpu_vreg_t(vgpu, GEN9_GFX_MOCS(index));
245 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
246 *(cs-2), *(cs-1), vgpu->id, req->engine->id);
251 intel_ring_advance(req, cs);
257 restore_render_mocs_l3cc_for_inhibit(struct intel_vgpu *vgpu,
258 struct i915_request *req)
263 cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE / 2 + 2);
267 *cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE / 2);
269 for (index = 0; index < GEN9_MOCS_SIZE / 2; index++) {
270 *cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index));
271 *cs++ = vgpu_vreg_t(vgpu, GEN9_LNCFCMOCS(index));
272 gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
273 *(cs-2), *(cs-1), vgpu->id, req->engine->id);
278 intel_ring_advance(req, cs);
284 * Use lri command to initialize the mmio which is in context state image for
285 * inhibit context, it contains tracked engine mmio, render_mocs and
288 int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
289 struct i915_request *req)
294 cs = intel_ring_begin(req, 2);
298 *cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
300 intel_ring_advance(req, cs);
302 ret = restore_context_mmio_for_inhibit(vgpu, req);
306 /* no MOCS register in context except render engine */
307 if (req->engine->id != RCS0)
310 ret = restore_render_mocs_control_for_inhibit(vgpu, req);
314 ret = restore_render_mocs_l3cc_for_inhibit(vgpu, req);
319 cs = intel_ring_begin(req, 2);
323 *cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
325 intel_ring_advance(req, cs);
330 static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
332 struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
333 struct intel_uncore *uncore = &dev_priv->uncore;
334 struct intel_vgpu_submission *s = &vgpu->submission;
335 enum forcewake_domains fw;
345 if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
348 if (!test_and_clear_bit(ring_id, (void *)s->tlb_handle_pending))
351 reg = _MMIO(regs[ring_id]);
353 /* WaForceWakeRenderDuringMmioTLBInvalidate:skl
354 * we need to put a forcewake when invalidating RCS TLB caches,
355 * otherwise device can go to RC6 state and interrupt invalidation
358 fw = intel_uncore_forcewake_for_reg(uncore, reg,
359 FW_REG_READ | FW_REG_WRITE);
360 if (ring_id == RCS0 && INTEL_GEN(dev_priv) >= 9)
361 fw |= FORCEWAKE_RENDER;
363 intel_uncore_forcewake_get(uncore, fw);
365 intel_uncore_write_fw(uncore, reg, 0x1);
367 if (wait_for_atomic((intel_uncore_read_fw(uncore, reg) == 0), 50))
368 gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
370 vgpu_vreg_t(vgpu, reg) = 0;
372 intel_uncore_forcewake_put(uncore, fw);
374 gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
377 static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
380 struct drm_i915_private *dev_priv;
381 i915_reg_t offset, l3_offset;
393 dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
394 if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
397 if (ring_id == RCS0 && IS_GEN(dev_priv, 9))
400 if (!pre && !gen9_render_mocs.initialized)
401 load_render_mocs(dev_priv);
403 offset.reg = regs[ring_id];
404 for (i = 0; i < GEN9_MOCS_SIZE; i++) {
406 old_v = vgpu_vreg_t(pre, offset);
408 old_v = gen9_render_mocs.control_table[ring_id][i];
410 new_v = vgpu_vreg_t(next, offset);
412 new_v = gen9_render_mocs.control_table[ring_id][i];
415 I915_WRITE_FW(offset, new_v);
420 if (ring_id == RCS0) {
421 l3_offset.reg = 0xb020;
422 for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
424 old_v = vgpu_vreg_t(pre, l3_offset);
426 old_v = gen9_render_mocs.l3cc_table[i];
428 new_v = vgpu_vreg_t(next, l3_offset);
430 new_v = gen9_render_mocs.l3cc_table[i];
433 I915_WRITE_FW(l3_offset, new_v);
440 #define CTX_CONTEXT_CONTROL_VAL 0x03
442 bool is_inhibit_context(struct intel_context *ce)
444 const u32 *reg_state = ce->lrc_reg_state;
446 _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
448 return inhibit_mask ==
449 (reg_state[CTX_CONTEXT_CONTROL_VAL] & inhibit_mask);
452 /* Switch ring mmio values (context). */
453 static void switch_mmio(struct intel_vgpu *pre,
454 struct intel_vgpu *next,
457 struct drm_i915_private *dev_priv;
458 struct intel_vgpu_submission *s;
459 struct engine_mmio *mmio;
462 dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
463 if (INTEL_GEN(dev_priv) >= 9)
464 switch_mocs(pre, next, ring_id);
466 for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
467 i915_mmio_reg_valid(mmio->reg); mmio++) {
468 if (mmio->ring_id != ring_id)
471 * No need to do save or restore of the mmio which is in context
472 * state image on gen9, it's initialized by lri command and
473 * save or restore with context together.
475 if (IS_GEN(dev_priv, 9) && mmio->in_context)
480 vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg);
482 vgpu_vreg_t(pre, mmio->reg) &=
484 old_v = vgpu_vreg_t(pre, mmio->reg);
486 old_v = mmio->value = I915_READ_FW(mmio->reg);
490 s = &next->submission;
492 * No need to restore the mmio which is in context state
493 * image if it's not inhibit context, it will restore
496 if (mmio->in_context &&
497 !is_inhibit_context(s->shadow[ring_id]))
501 new_v = vgpu_vreg_t(next, mmio->reg) |
504 new_v = vgpu_vreg_t(next, mmio->reg);
506 if (mmio->in_context)
509 new_v = mmio->value | (mmio->mask << 16);
514 I915_WRITE_FW(mmio->reg, new_v);
516 trace_render_mmio(pre ? pre->id : 0,
519 i915_mmio_reg_offset(mmio->reg),
524 handle_tlb_pending_event(next, ring_id);
528 * intel_gvt_switch_render_mmio - switch mmio context of specific engine
529 * @pre: the last vGPU that own the engine
530 * @next: the vGPU to switch to
531 * @ring_id: specify the engine
533 * If pre is null indicates that host own the engine. If next is null
534 * indicates that we are switching to host workload.
536 void intel_gvt_switch_mmio(struct intel_vgpu *pre,
537 struct intel_vgpu *next, int ring_id)
539 struct drm_i915_private *dev_priv;
541 if (WARN_ON(!pre && !next))
544 gvt_dbg_render("switch ring %d from %s to %s\n", ring_id,
545 pre ? "vGPU" : "host", next ? "vGPU" : "HOST");
547 dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
550 * We are using raw mmio access wrapper to improve the
551 * performace for batch mmio read/write, so we need
552 * handle forcewake mannually.
554 intel_uncore_forcewake_get(&dev_priv->uncore, FORCEWAKE_ALL);
555 switch_mmio(pre, next, ring_id);
556 intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL);
560 * intel_gvt_init_engine_mmio_context - Initiate the engine mmio list
564 void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
566 struct engine_mmio *mmio;
568 if (INTEL_GEN(gvt->dev_priv) >= 9)
569 gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
571 gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
573 for (mmio = gvt->engine_mmio_list.mmio;
574 i915_mmio_reg_valid(mmio->reg); mmio++) {
575 if (mmio->in_context) {
576 gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
577 intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg);