*/
#define LEGACY_REQUEST_SIZE 200
-int __intel_ring_space(int head, int tail, int size)
+static unsigned int __intel_ring_space(unsigned int head,
+ unsigned int tail,
+ unsigned int size)
{
- int space = head - tail;
- if (space <= 0)
- space += size;
- return space - I915_RING_FREE_SPACE;
+ /*
+ * "If the Ring Buffer Head Pointer and the Tail Pointer are on the
+ * same cacheline, the Head Pointer must not be greater than the Tail
+ * Pointer."
+ */
+ GEM_BUG_ON(!is_power_of_2(size));
+ return (head - tail - CACHELINE_BYTES) & (size - 1);
}
-void intel_ring_update_space(struct intel_ring *ring)
+unsigned int intel_ring_update_space(struct intel_ring *ring)
{
- if (ring->last_retired_head != -1) {
- ring->head = ring->last_retired_head;
- ring->last_retired_head = -1;
- }
+ unsigned int space;
- ring->space = __intel_ring_space(ring->head & HEAD_ADDR,
- ring->tail, ring->size);
+ space = __intel_ring_space(ring->head, ring->emit, ring->size);
+
+ ring->space = space;
+ return space;
}
static int
gen2_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_ring *ring = req->ring;
- u32 cmd;
- int ret;
+ u32 cmd, *cs;
cmd = MI_FLUSH;
if (mode & EMIT_INVALIDATE)
cmd |= MI_READ_FLUSH;
- ret = intel_ring_begin(req, 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = cmd;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
static int
gen4_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_ring *ring = req->ring;
- u32 cmd;
- int ret;
+ u32 cmd, *cs;
/*
* read/write caches:
cmd |= MI_INVALIDATE_ISP;
}
- ret = intel_ring_begin(req, 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = cmd;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
static int
intel_emit_post_sync_nonzero_flush(struct drm_i915_gem_request *req)
{
- struct intel_ring *ring = req->ring;
u32 scratch_addr =
i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
- int ret;
-
- ret = intel_ring_begin(req, 6);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
- intel_ring_emit(ring, PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_STALL_AT_SCOREBOARD);
- intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, 0); /* low dword */
- intel_ring_emit(ring, 0); /* high dword */
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
-
- ret = intel_ring_begin(req, 6);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(5));
- intel_ring_emit(ring, PIPE_CONTROL_QW_WRITE);
- intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ u32 *cs;
+
+ cs = intel_ring_begin(req, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(5);
+ *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+ *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0; /* low dword */
+ *cs++ = 0; /* high dword */
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
+
+ cs = intel_ring_begin(req, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(5);
+ *cs++ = PIPE_CONTROL_QW_WRITE;
+ *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
static int
gen6_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_ring *ring = req->ring;
u32 scratch_addr =
i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
- u32 flags = 0;
+ u32 *cs, flags = 0;
int ret;
/* Force SNB workarounds for PIPE_CONTROL flushes */
flags |= PIPE_CONTROL_QW_WRITE | PIPE_CONTROL_CS_STALL;
}
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
- intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr | PIPE_CONTROL_GLOBAL_GTT);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = flags;
+ *cs++ = scratch_addr | PIPE_CONTROL_GLOBAL_GTT;
+ *cs++ = 0;
+ intel_ring_advance(req, cs);
return 0;
}
static int
gen7_render_ring_cs_stall_wa(struct drm_i915_gem_request *req)
{
- struct intel_ring *ring = req->ring;
- int ret;
+ u32 *cs;
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
- intel_ring_emit(ring,
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_STALL_AT_SCOREBOARD);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = PIPE_CONTROL_CS_STALL | PIPE_CONTROL_STALL_AT_SCOREBOARD;
+ *cs++ = 0;
+ *cs++ = 0;
+ intel_ring_advance(req, cs);
return 0;
}
static int
gen7_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_ring *ring = req->ring;
u32 scratch_addr =
i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
- u32 flags = 0;
- int ret;
+ u32 *cs, flags = 0;
/*
* Ensure that any following seqno writes only happen when the render
gen7_render_ring_cs_stall_wa(req);
}
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
-
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(4));
- intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
-
- return 0;
-}
-
-static int
-gen8_emit_pipe_control(struct drm_i915_gem_request *req,
- u32 flags, u32 scratch_addr)
-{
- struct intel_ring *ring = req->ring;
- int ret;
-
- ret = intel_ring_begin(req, 6);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, GFX_OP_PIPE_CONTROL(6));
- intel_ring_emit(ring, flags);
- intel_ring_emit(ring, scratch_addr);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, 0);
- intel_ring_advance(ring);
+ *cs++ = GFX_OP_PIPE_CONTROL(4);
+ *cs++ = flags;
+ *cs++ = scratch_addr;
+ *cs++ = 0;
+ intel_ring_advance(req, cs);
return 0;
}
static int
gen8_render_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- u32 scratch_addr =
- i915_ggtt_offset(req->engine->scratch) + 2 * CACHELINE_BYTES;
- u32 flags = 0;
- int ret;
+ u32 flags;
+ u32 *cs;
- flags |= PIPE_CONTROL_CS_STALL;
+ cs = intel_ring_begin(req, mode & EMIT_INVALIDATE ? 12 : 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
+
+ flags = PIPE_CONTROL_CS_STALL;
if (mode & EMIT_FLUSH) {
flags |= PIPE_CONTROL_RENDER_TARGET_CACHE_FLUSH;
flags |= PIPE_CONTROL_GLOBAL_GTT_IVB;
/* WaCsStallBeforeStateCacheInvalidate:bdw,chv */
- ret = gen8_emit_pipe_control(req,
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_STALL_AT_SCOREBOARD,
- 0);
- if (ret)
- return ret;
+ cs = gen8_emit_pipe_control(cs,
+ PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_STALL_AT_SCOREBOARD,
+ 0);
}
- return gen8_emit_pipe_control(req, flags, scratch_addr);
+ cs = gen8_emit_pipe_control(cs, flags,
+ i915_ggtt_offset(req->engine->scratch) +
+ 2 * CACHELINE_BYTES);
+
+ intel_ring_advance(req, cs);
+
+ return 0;
}
static void ring_setup_phys_status_page(struct intel_engine_cs *engine)
I915_WRITE_CTL(engine, RING_CTL_SIZE(ring->size) | RING_VALID);
/* If the head is still not zero, the ring is dead */
- if (intel_wait_for_register_fw(dev_priv, RING_CTL(engine->mmio_base),
- RING_VALID, RING_VALID,
- 50)) {
+ if (intel_wait_for_register(dev_priv, RING_CTL(engine->mmio_base),
+ RING_VALID, RING_VALID,
+ 50)) {
DRM_ERROR("%s initialization failed "
"ctl %08x (valid? %d) head %08x [%08x] tail %08x [%08x] start %08x [expected %08x]\n",
engine->name,
}
/* If the rq hung, jump to its breadcrumb and skip the batch */
- if (request->fence.error == -EIO) {
- struct intel_ring *ring = request->ring;
-
- ring->head = request->postfix;
- ring->last_retired_head = -1;
- }
+ if (request->fence.error == -EIO)
+ request->ring->head = request->postfix;
} else {
engine->legacy_active_context = NULL;
}
}
-static int intel_ring_workarounds_emit(struct drm_i915_gem_request *req)
-{
- struct intel_ring *ring = req->ring;
- struct i915_workarounds *w = &req->i915->workarounds;
- int ret, i;
-
- if (w->count == 0)
- return 0;
-
- ret = req->engine->emit_flush(req, EMIT_BARRIER);
- if (ret)
- return ret;
-
- ret = intel_ring_begin(req, (w->count * 2 + 2));
- if (ret)
- return ret;
-
- intel_ring_emit(ring, MI_LOAD_REGISTER_IMM(w->count));
- for (i = 0; i < w->count; i++) {
- intel_ring_emit_reg(ring, w->reg[i].addr);
- intel_ring_emit(ring, w->reg[i].value);
- }
- intel_ring_emit(ring, MI_NOOP);
-
- intel_ring_advance(ring);
-
- ret = req->engine->emit_flush(req, EMIT_BARRIER);
- if (ret)
- return ret;
-
- DRM_DEBUG_DRIVER("Number of Workarounds emitted: %d\n", w->count);
-
- return 0;
-}
-
static int intel_rcs_ctx_init(struct drm_i915_gem_request *req)
{
int ret;
return 0;
}
-static int wa_add(struct drm_i915_private *dev_priv,
- i915_reg_t addr,
- const u32 mask, const u32 val)
-{
- const u32 idx = dev_priv->workarounds.count;
-
- if (WARN_ON(idx >= I915_MAX_WA_REGS))
- return -ENOSPC;
-
- dev_priv->workarounds.reg[idx].addr = addr;
- dev_priv->workarounds.reg[idx].value = val;
- dev_priv->workarounds.reg[idx].mask = mask;
-
- dev_priv->workarounds.count++;
-
- return 0;
-}
-
-#define WA_REG(addr, mask, val) do { \
- const int r = wa_add(dev_priv, (addr), (mask), (val)); \
- if (r) \
- return r; \
- } while (0)
-
-#define WA_SET_BIT_MASKED(addr, mask) \
- WA_REG(addr, (mask), _MASKED_BIT_ENABLE(mask))
-
-#define WA_CLR_BIT_MASKED(addr, mask) \
- WA_REG(addr, (mask), _MASKED_BIT_DISABLE(mask))
-
-#define WA_SET_FIELD_MASKED(addr, mask, value) \
- WA_REG(addr, mask, _MASKED_FIELD(mask, value))
-
-#define WA_SET_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) | (mask))
-#define WA_CLR_BIT(addr, mask) WA_REG(addr, mask, I915_READ(addr) & ~(mask))
-
-#define WA_WRITE(addr, val) WA_REG(addr, 0xffffffff, val)
-
-static int wa_ring_whitelist_reg(struct intel_engine_cs *engine,
- i915_reg_t reg)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- struct i915_workarounds *wa = &dev_priv->workarounds;
- const uint32_t index = wa->hw_whitelist_count[engine->id];
-
- if (WARN_ON(index >= RING_MAX_NONPRIV_SLOTS))
- return -EINVAL;
-
- WA_WRITE(RING_FORCE_TO_NONPRIV(engine->mmio_base, index),
- i915_mmio_reg_offset(reg));
- wa->hw_whitelist_count[engine->id]++;
-
- return 0;
-}
-
-static int gen8_init_workarounds(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- WA_SET_BIT_MASKED(INSTPM, INSTPM_FORCE_ORDERING);
-
- /* WaDisableAsyncFlipPerfMode:bdw,chv */
- WA_SET_BIT_MASKED(MI_MODE, ASYNC_FLIP_PERF_DISABLE);
-
- /* WaDisablePartialInstShootdown:bdw,chv */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
- PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
-
- /* Use Force Non-Coherent whenever executing a 3D context. This is a
- * workaround for for a possible hang in the unlikely event a TLB
- * invalidation occurs during a PSD flush.
- */
- /* WaForceEnableNonCoherent:bdw,chv */
- /* WaHdcDisableFetchWhenMasked:bdw,chv */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_DONOT_FETCH_MEM_WHEN_MASKED |
- HDC_FORCE_NON_COHERENT);
-
- /* From the Haswell PRM, Command Reference: Registers, CACHE_MODE_0:
- * "The Hierarchical Z RAW Stall Optimization allows non-overlapping
- * polygons in the same 8x4 pixel/sample area to be processed without
- * stalling waiting for the earlier ones to write to Hierarchical Z
- * buffer."
- *
- * This optimization is off by default for BDW and CHV; turn it on.
- */
- WA_CLR_BIT_MASKED(CACHE_MODE_0_GEN7, HIZ_RAW_STALL_OPT_DISABLE);
-
- /* Wa4x4STCOptimizationDisable:bdw,chv */
- WA_SET_BIT_MASKED(CACHE_MODE_1, GEN8_4x4_STC_OPTIMIZATION_DISABLE);
-
- /*
- * BSpec recommends 8x4 when MSAA is used,
- * however in practice 16x4 seems fastest.
- *
- * Note that PS/WM thread counts depend on the WIZ hashing
- * disable bit, which we don't touch here, but it's good
- * to keep in mind (see 3DSTATE_PS and 3DSTATE_WM).
- */
- WA_SET_FIELD_MASKED(GEN7_GT_MODE,
- GEN6_WIZ_HASHING_MASK,
- GEN6_WIZ_HASHING_16x4);
-
- return 0;
-}
-
-static int bdw_init_workarounds(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- ret = gen8_init_workarounds(engine);
- if (ret)
- return ret;
-
- /* WaDisableThreadStallDopClockGating:bdw (pre-production) */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
-
- /* WaDisableDopClockGating:bdw */
- WA_SET_BIT_MASKED(GEN7_ROW_CHICKEN2,
- DOP_CLOCK_GATING_DISABLE);
-
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
- GEN8_SAMPLER_POWER_BYPASS_DIS);
-
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- /* WaForceContextSaveRestoreNonCoherent:bdw */
- HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
- /* WaDisableFenceDestinationToSLM:bdw (pre-prod) */
- (IS_BDW_GT3(dev_priv) ? HDC_FENCE_DEST_SLM_DISABLE : 0));
-
- return 0;
-}
-
-static int chv_init_workarounds(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- ret = gen8_init_workarounds(engine);
- if (ret)
- return ret;
-
- /* WaDisableThreadStallDopClockGating:chv */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN, STALL_DOP_GATING_DISABLE);
-
- /* Improve HiZ throughput on CHV. */
- WA_SET_BIT_MASKED(HIZ_CHICKEN, CHV_HZ_8X8_MODE_IN_1X);
-
- return 0;
-}
-
-static int gen9_init_workarounds(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- /* WaConextSwitchWithConcurrentTLBInvalidate:skl,bxt,kbl */
- I915_WRITE(GEN9_CSFE_CHICKEN1_RCS, _MASKED_BIT_ENABLE(GEN9_PREEMPT_GPGPU_SYNC_SWITCH_DISABLE));
-
- /* WaEnableLbsSlaRetryTimerDecrement:skl,bxt,kbl */
- I915_WRITE(BDW_SCRATCH1, I915_READ(BDW_SCRATCH1) |
- GEN9_LBS_SLA_RETRY_TIMER_DECREMENT_ENABLE);
-
- /* WaDisableKillLogic:bxt,skl,kbl */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- ECOCHK_DIS_TLB);
-
- /* WaClearFlowControlGpgpuContextSave:skl,bxt,kbl */
- /* WaDisablePartialInstShootdown:skl,bxt,kbl */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
- FLOW_CONTROL_ENABLE |
- PARTIAL_INSTRUCTION_SHOOTDOWN_DISABLE);
-
- /* Syncing dependencies between camera and graphics:skl,bxt,kbl */
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
- GEN9_DISABLE_OCL_OOB_SUPPRESS_LOGIC);
-
- /* WaDisableDgMirrorFixInHalfSliceChicken5:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
- WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
- GEN9_DG_MIRROR_FIX_ENABLE);
-
- /* WaSetDisablePixMaskCammingAndRhwoInCommonSliceChicken:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
- WA_SET_BIT_MASKED(GEN7_COMMON_SLICE_CHICKEN1,
- GEN9_RHWO_OPTIMIZATION_DISABLE);
- /*
- * WA also requires GEN9_SLICE_COMMON_ECO_CHICKEN0[14:14] to be set
- * but we do that in per ctx batchbuffer as there is an issue
- * with this register not getting restored on ctx restore
- */
- }
-
- /* WaEnableSamplerGPGPUPreemptionSupport:skl,bxt,kbl */
- WA_SET_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN7,
- GEN9_ENABLE_GPGPU_PREEMPTION);
-
- /* Wa4x4STCOptimizationDisable:skl,bxt,kbl */
- /* WaDisablePartialResolveInVc:skl,bxt,kbl */
- WA_SET_BIT_MASKED(CACHE_MODE_1, (GEN8_4x4_STC_OPTIMIZATION_DISABLE |
- GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE));
-
- /* WaCcsTlbPrefetchDisable:skl,bxt,kbl */
- WA_CLR_BIT_MASKED(GEN9_HALF_SLICE_CHICKEN5,
- GEN9_CCS_TLB_PREFETCH_ENABLE);
-
- /* WaDisableMaskBasedCammingInRCC:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
- WA_SET_BIT_MASKED(SLICE_ECO_CHICKEN0,
- PIXEL_MASK_CAMMING_DISABLE);
-
- /* WaForceContextSaveRestoreNonCoherent:skl,bxt,kbl */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FORCE_CONTEXT_SAVE_RESTORE_NON_COHERENT |
- HDC_FORCE_CSR_NON_COHERENT_OVR_DISABLE);
-
- /* WaForceEnableNonCoherent and WaDisableHDCInvalidation are
- * both tied to WaForceContextSaveRestoreNonCoherent
- * in some hsds for skl. We keep the tie for all gen9. The
- * documentation is a bit hazy and so we want to get common behaviour,
- * even though there is no clear evidence we would need both on kbl/bxt.
- * This area has been source of system hangs so we play it safe
- * and mimic the skl regardless of what bspec says.
- *
- * Use Force Non-Coherent whenever executing a 3D context. This
- * is a workaround for a possible hang in the unlikely event
- * a TLB invalidation occurs during a PSD flush.
- */
-
- /* WaForceEnableNonCoherent:skl,bxt,kbl */
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FORCE_NON_COHERENT);
-
- /* WaDisableHDCInvalidation:skl,bxt,kbl */
- I915_WRITE(GAM_ECOCHK, I915_READ(GAM_ECOCHK) |
- BDW_DISABLE_HDC_INVALIDATION);
-
- /* WaDisableSamplerPowerBypassForSOPingPong:skl,bxt,kbl */
- if (IS_SKYLAKE(dev_priv) ||
- IS_KABYLAKE(dev_priv) ||
- IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0))
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN3,
- GEN8_SAMPLER_POWER_BYPASS_DIS);
-
- /* WaDisableSTUnitPowerOptimization:skl,bxt,kbl */
- WA_SET_BIT_MASKED(HALF_SLICE_CHICKEN2, GEN8_ST_PO_DISABLE);
-
- /* WaOCLCoherentLineFlush:skl,bxt,kbl */
- I915_WRITE(GEN8_L3SQCREG4, (I915_READ(GEN8_L3SQCREG4) |
- GEN8_LQSC_FLUSH_COHERENT_LINES));
-
- /* WaVFEStateAfterPipeControlwithMediaStateClear:skl,bxt */
- ret = wa_ring_whitelist_reg(engine, GEN9_CTX_PREEMPT_REG);
- if (ret)
- return ret;
-
- /* WaEnablePreemptionGranularityControlByUMD:skl,bxt,kbl */
- ret= wa_ring_whitelist_reg(engine, GEN8_CS_CHICKEN1);
- if (ret)
- return ret;
-
- /* WaAllowUMDToModifyHDCChicken1:skl,bxt,kbl */
- ret = wa_ring_whitelist_reg(engine, GEN8_HDC_CHICKEN1);
- if (ret)
- return ret;
-
- return 0;
-}
-
-static int skl_tune_iz_hashing(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- u8 vals[3] = { 0, 0, 0 };
- unsigned int i;
-
- for (i = 0; i < 3; i++) {
- u8 ss;
-
- /*
- * Only consider slices where one, and only one, subslice has 7
- * EUs
- */
- if (!is_power_of_2(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]))
- continue;
-
- /*
- * subslice_7eu[i] != 0 (because of the check above) and
- * ss_max == 4 (maximum number of subslices possible per slice)
- *
- * -> 0 <= ss <= 3;
- */
- ss = ffs(INTEL_INFO(dev_priv)->sseu.subslice_7eu[i]) - 1;
- vals[i] = 3 - ss;
- }
-
- if (vals[0] == 0 && vals[1] == 0 && vals[2] == 0)
- return 0;
-
- /* Tune IZ hashing. See intel_device_info_runtime_init() */
- WA_SET_FIELD_MASKED(GEN7_GT_MODE,
- GEN9_IZ_HASHING_MASK(2) |
- GEN9_IZ_HASHING_MASK(1) |
- GEN9_IZ_HASHING_MASK(0),
- GEN9_IZ_HASHING(2, vals[2]) |
- GEN9_IZ_HASHING(1, vals[1]) |
- GEN9_IZ_HASHING(0, vals[0]));
-
- return 0;
-}
-
-static int skl_init_workarounds(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- ret = gen9_init_workarounds(engine);
- if (ret)
- return ret;
-
- /*
- * Actual WA is to disable percontext preemption granularity control
- * until D0 which is the default case so this is equivalent to
- * !WaDisablePerCtxtPreemptionGranularityControl:skl
- */
- I915_WRITE(GEN7_FF_SLICE_CS_CHICKEN1,
- _MASKED_BIT_ENABLE(GEN9_FFSC_PERCTX_PREEMPT_CTRL));
-
- /* WaEnableGapsTsvCreditFix:skl */
- I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
- GEN9_GAPS_TSV_CREDIT_DISABLE));
-
- /* WaDisableGafsUnitClkGating:skl */
- WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
-
- /* WaInPlaceDecompressionHang:skl */
- if (IS_SKL_REVID(dev_priv, SKL_REVID_H0, REVID_FOREVER))
- WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- /* WaDisableLSQCROPERFforOCL:skl */
- ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
- if (ret)
- return ret;
-
- return skl_tune_iz_hashing(engine);
-}
-
-static int bxt_init_workarounds(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- ret = gen9_init_workarounds(engine);
- if (ret)
- return ret;
-
- /* WaStoreMultiplePTEenable:bxt */
- /* This is a requirement according to Hardware specification */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1))
- I915_WRITE(TILECTL, I915_READ(TILECTL) | TILECTL_TLBPF);
-
- /* WaSetClckGatingDisableMedia:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
- I915_WRITE(GEN7_MISCCPCTL, (I915_READ(GEN7_MISCCPCTL) &
- ~GEN8_DOP_CLOCK_GATE_MEDIA_ENABLE));
- }
-
- /* WaDisableThreadStallDopClockGating:bxt */
- WA_SET_BIT_MASKED(GEN8_ROW_CHICKEN,
- STALL_DOP_GATING_DISABLE);
-
- /* WaDisablePooledEuLoadBalancingFix:bxt */
- if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER)) {
- WA_SET_BIT_MASKED(FF_SLICE_CS_CHICKEN2,
- GEN9_POOLED_EU_LOAD_BALANCING_FIX_DISABLE);
- }
-
- /* WaDisableSbeCacheDispatchPortSharing:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_B0)) {
- WA_SET_BIT_MASKED(
- GEN7_HALF_SLICE_CHICKEN1,
- GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
- }
-
- /* WaDisableObjectLevelPreemptionForTrifanOrPolygon:bxt */
- /* WaDisableObjectLevelPreemptionForInstancedDraw:bxt */
- /* WaDisableObjectLevelPreemtionForInstanceId:bxt */
- /* WaDisableLSQCROPERFforOCL:bxt */
- if (IS_BXT_REVID(dev_priv, 0, BXT_REVID_A1)) {
- ret = wa_ring_whitelist_reg(engine, GEN9_CS_DEBUG_MODE1);
- if (ret)
- return ret;
-
- ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
- if (ret)
- return ret;
- }
-
- /* WaProgramL3SqcReg1DefaultForPerf:bxt */
- if (IS_BXT_REVID(dev_priv, BXT_REVID_B0, REVID_FOREVER))
- I915_WRITE(GEN8_L3SQCREG1, L3_GENERAL_PRIO_CREDITS(62) |
- L3_HIGH_PRIO_CREDITS(2));
-
- /* WaToEnableHwFixForPushConstHWBug:bxt */
- if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
- /* WaInPlaceDecompressionHang:bxt */
- if (IS_BXT_REVID(dev_priv, BXT_REVID_C0, REVID_FOREVER))
- WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- return 0;
-}
-
-static int kbl_init_workarounds(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
- int ret;
-
- ret = gen9_init_workarounds(engine);
- if (ret)
- return ret;
-
- /* WaEnableGapsTsvCreditFix:kbl */
- I915_WRITE(GEN8_GARBCNTL, (I915_READ(GEN8_GARBCNTL) |
- GEN9_GAPS_TSV_CREDIT_DISABLE));
-
- /* WaDisableDynamicCreditSharing:kbl */
- if (IS_KBL_REVID(dev_priv, 0, KBL_REVID_B0))
- WA_SET_BIT(GAMT_CHKN_BIT_REG,
- GAMT_CHKN_DISABLE_DYNAMIC_CREDIT_SHARING);
-
- /* WaDisableFenceDestinationToSLM:kbl (pre-prod) */
- if (IS_KBL_REVID(dev_priv, KBL_REVID_A0, KBL_REVID_A0))
- WA_SET_BIT_MASKED(HDC_CHICKEN0,
- HDC_FENCE_DEST_SLM_DISABLE);
-
- /* WaToEnableHwFixForPushConstHWBug:kbl */
- if (IS_KBL_REVID(dev_priv, KBL_REVID_C0, REVID_FOREVER))
- WA_SET_BIT_MASKED(COMMON_SLICE_CHICKEN2,
- GEN8_SBE_DISABLE_REPLAY_BUF_OPTIMIZATION);
-
- /* WaDisableGafsUnitClkGating:kbl */
- WA_SET_BIT(GEN7_UCGCTL4, GEN8_EU_GAUNIT_CLOCK_GATE_DISABLE);
-
- /* WaDisableSbeCacheDispatchPortSharing:kbl */
- WA_SET_BIT_MASKED(
- GEN7_HALF_SLICE_CHICKEN1,
- GEN7_SBE_SS_CACHE_DISPATCH_PORT_SHARING_DISABLE);
-
- /* WaInPlaceDecompressionHang:kbl */
- WA_SET_BIT(GEN9_GAMT_ECO_REG_RW_IA,
- GAMT_ECO_ENABLE_IN_PLACE_DECOMPRESS);
-
- /* WaDisableLSQCROPERFforOCL:kbl */
- ret = wa_ring_whitelist_reg(engine, GEN8_L3SQCREG4);
- if (ret)
- return ret;
-
- return 0;
-}
-
-int init_workarounds_ring(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- WARN_ON(engine->id != RCS);
-
- dev_priv->workarounds.count = 0;
- dev_priv->workarounds.hw_whitelist_count[RCS] = 0;
-
- if (IS_BROADWELL(dev_priv))
- return bdw_init_workarounds(engine);
-
- if (IS_CHERRYVIEW(dev_priv))
- return chv_init_workarounds(engine);
-
- if (IS_SKYLAKE(dev_priv))
- return skl_init_workarounds(engine);
-
- if (IS_BROXTON(dev_priv))
- return bxt_init_workarounds(engine);
-
- if (IS_KABYLAKE(dev_priv))
- return kbl_init_workarounds(engine);
-
- return 0;
-}
-
static int init_render_ring(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;
i915_vma_unpin_and_release(&dev_priv->semaphore);
}
-static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *out)
+static u32 *gen8_rcs_signal(struct drm_i915_gem_request *req, u32 *cs)
{
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *waiter;
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
- *out++ = GFX_OP_PIPE_CONTROL(6);
- *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_QW_WRITE |
- PIPE_CONTROL_CS_STALL);
- *out++ = lower_32_bits(gtt_offset);
- *out++ = upper_32_bits(gtt_offset);
- *out++ = req->global_seqno;
- *out++ = 0;
- *out++ = (MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->hw_id));
- *out++ = 0;
+ *cs++ = GFX_OP_PIPE_CONTROL(6);
+ *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_QW_WRITE |
+ PIPE_CONTROL_CS_STALL;
+ *cs++ = lower_32_bits(gtt_offset);
+ *cs++ = upper_32_bits(gtt_offset);
+ *cs++ = req->global_seqno;
+ *cs++ = 0;
+ *cs++ = MI_SEMAPHORE_SIGNAL |
+ MI_SEMAPHORE_TARGET(waiter->hw_id);
+ *cs++ = 0;
}
- return out;
+ return cs;
}
-static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *out)
+static u32 *gen8_xcs_signal(struct drm_i915_gem_request *req, u32 *cs)
{
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *waiter;
if (gtt_offset == MI_SEMAPHORE_SYNC_INVALID)
continue;
- *out++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
- *out++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
- *out++ = upper_32_bits(gtt_offset);
- *out++ = req->global_seqno;
- *out++ = (MI_SEMAPHORE_SIGNAL |
- MI_SEMAPHORE_TARGET(waiter->hw_id));
- *out++ = 0;
+ *cs++ = (MI_FLUSH_DW + 1) | MI_FLUSH_DW_OP_STOREDW;
+ *cs++ = lower_32_bits(gtt_offset) | MI_FLUSH_DW_USE_GTT;
+ *cs++ = upper_32_bits(gtt_offset);
+ *cs++ = req->global_seqno;
+ *cs++ = MI_SEMAPHORE_SIGNAL |
+ MI_SEMAPHORE_TARGET(waiter->hw_id);
+ *cs++ = 0;
}
- return out;
+ return cs;
}
-static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *out)
+static u32 *gen6_signal(struct drm_i915_gem_request *req, u32 *cs)
{
struct drm_i915_private *dev_priv = req->i915;
struct intel_engine_cs *engine;
mbox_reg = req->engine->semaphore.mbox.signal[engine->hw_id];
if (i915_mmio_reg_valid(mbox_reg)) {
- *out++ = MI_LOAD_REGISTER_IMM(1);
- *out++ = i915_mmio_reg_offset(mbox_reg);
- *out++ = req->global_seqno;
+ *cs++ = MI_LOAD_REGISTER_IMM(1);
+ *cs++ = i915_mmio_reg_offset(mbox_reg);
+ *cs++ = req->global_seqno;
num_rings++;
}
}
if (num_rings & 1)
- *out++ = MI_NOOP;
+ *cs++ = MI_NOOP;
- return out;
+ return cs;
}
static void i9xx_submit_request(struct drm_i915_gem_request *request)
i915_gem_request_submit(request);
- I915_WRITE_TAIL(request->engine, request->tail);
+ I915_WRITE_TAIL(request->engine,
+ intel_ring_set_tail(request->ring, request->tail));
}
-static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req,
- u32 *out)
+static void i9xx_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
{
- *out++ = MI_STORE_DWORD_INDEX;
- *out++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
- *out++ = req->global_seqno;
- *out++ = MI_USER_INTERRUPT;
+ *cs++ = MI_STORE_DWORD_INDEX;
+ *cs++ = I915_GEM_HWS_INDEX << MI_STORE_DWORD_INDEX_SHIFT;
+ *cs++ = req->global_seqno;
+ *cs++ = MI_USER_INTERRUPT;
- req->tail = intel_ring_offset(req->ring, out);
+ req->tail = intel_ring_offset(req, cs);
+ assert_ring_tail_valid(req->ring, req->tail);
}
static const int i9xx_emit_breadcrumb_sz = 4;
* Update the mailbox registers in the *other* rings with the current seqno.
* This acts like a signal in the canonical semaphore.
*/
-static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req,
- u32 *out)
+static void gen6_sema_emit_breadcrumb(struct drm_i915_gem_request *req, u32 *cs)
{
return i9xx_emit_breadcrumb(req,
- req->engine->semaphore.signal(req, out));
+ req->engine->semaphore.signal(req, cs));
}
static void gen8_render_emit_breadcrumb(struct drm_i915_gem_request *req,
- u32 *out)
+ u32 *cs)
{
struct intel_engine_cs *engine = req->engine;
if (engine->semaphore.signal)
- out = engine->semaphore.signal(req, out);
-
- *out++ = GFX_OP_PIPE_CONTROL(6);
- *out++ = (PIPE_CONTROL_GLOBAL_GTT_IVB |
- PIPE_CONTROL_CS_STALL |
- PIPE_CONTROL_QW_WRITE);
- *out++ = intel_hws_seqno_address(engine);
- *out++ = 0;
- *out++ = req->global_seqno;
+ cs = engine->semaphore.signal(req, cs);
+
+ *cs++ = GFX_OP_PIPE_CONTROL(6);
+ *cs++ = PIPE_CONTROL_GLOBAL_GTT_IVB | PIPE_CONTROL_CS_STALL |
+ PIPE_CONTROL_QW_WRITE;
+ *cs++ = intel_hws_seqno_address(engine);
+ *cs++ = 0;
+ *cs++ = req->global_seqno;
/* We're thrashing one dword of HWS. */
- *out++ = 0;
- *out++ = MI_USER_INTERRUPT;
- *out++ = MI_NOOP;
+ *cs++ = 0;
+ *cs++ = MI_USER_INTERRUPT;
+ *cs++ = MI_NOOP;
- req->tail = intel_ring_offset(req->ring, out);
+ req->tail = intel_ring_offset(req, cs);
+ assert_ring_tail_valid(req->ring, req->tail);
}
static const int gen8_render_emit_breadcrumb_sz = 8;
gen8_ring_sync_to(struct drm_i915_gem_request *req,
struct drm_i915_gem_request *signal)
{
- struct intel_ring *ring = req->ring;
struct drm_i915_private *dev_priv = req->i915;
u64 offset = GEN8_WAIT_OFFSET(req->engine, signal->engine->id);
struct i915_hw_ppgtt *ppgtt;
- int ret;
+ u32 *cs;
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring,
- MI_SEMAPHORE_WAIT |
- MI_SEMAPHORE_GLOBAL_GTT |
- MI_SEMAPHORE_SAD_GTE_SDD);
- intel_ring_emit(ring, signal->global_seqno);
- intel_ring_emit(ring, lower_32_bits(offset));
- intel_ring_emit(ring, upper_32_bits(offset));
- intel_ring_advance(ring);
+ *cs++ = MI_SEMAPHORE_WAIT | MI_SEMAPHORE_GLOBAL_GTT |
+ MI_SEMAPHORE_SAD_GTE_SDD;
+ *cs++ = signal->global_seqno;
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ intel_ring_advance(req, cs);
/* When the !RCS engines idle waiting upon a semaphore, they lose their
* pagetables and we must reload them before executing the batch.
gen6_ring_sync_to(struct drm_i915_gem_request *req,
struct drm_i915_gem_request *signal)
{
- struct intel_ring *ring = req->ring;
u32 dw1 = MI_SEMAPHORE_MBOX |
MI_SEMAPHORE_COMPARE |
MI_SEMAPHORE_REGISTER;
u32 wait_mbox = signal->engine->semaphore.mbox.wait[req->engine->hw_id];
- int ret;
+ u32 *cs;
WARN_ON(wait_mbox == MI_SEMAPHORE_SYNC_INVALID);
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, dw1 | wait_mbox);
+ *cs++ = dw1 | wait_mbox;
/* Throughout all of the GEM code, seqno passed implies our current
* seqno is >= the last seqno executed. However for hardware the
* comparison is strictly greater than.
*/
- intel_ring_emit(ring, signal->global_seqno - 1);
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = signal->global_seqno - 1;
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
static int
bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_ring *ring = req->ring;
- int ret;
+ u32 *cs;
- ret = intel_ring_begin(req, 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, MI_FLUSH);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = MI_FLUSH;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
u64 offset, u32 length,
unsigned int dispatch_flags)
{
- struct intel_ring *ring = req->ring;
- int ret;
+ u32 *cs;
- ret = intel_ring_begin(req, 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring,
- MI_BATCH_BUFFER_START |
- MI_BATCH_GTT |
- (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_NON_SECURE_I965));
- intel_ring_emit(ring, offset);
- intel_ring_advance(ring);
+ *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT | (dispatch_flags &
+ I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE_I965);
+ *cs++ = offset;
+ intel_ring_advance(req, cs);
return 0;
}
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- struct intel_ring *ring = req->ring;
- u32 cs_offset = i915_ggtt_offset(req->engine->scratch);
- int ret;
+ u32 *cs, cs_offset = i915_ggtt_offset(req->engine->scratch);
- ret = intel_ring_begin(req, 6);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 6);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
/* Evict the invalid PTE TLBs */
- intel_ring_emit(ring, COLOR_BLT_CMD | BLT_WRITE_RGBA);
- intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096);
- intel_ring_emit(ring, I830_TLB_ENTRIES << 16 | 4); /* load each page */
- intel_ring_emit(ring, cs_offset);
- intel_ring_emit(ring, 0xdeadbeef);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = COLOR_BLT_CMD | BLT_WRITE_RGBA;
+ *cs++ = BLT_DEPTH_32 | BLT_ROP_COLOR_COPY | 4096;
+ *cs++ = I830_TLB_ENTRIES << 16 | 4; /* load each page */
+ *cs++ = cs_offset;
+ *cs++ = 0xdeadbeef;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
if ((dispatch_flags & I915_DISPATCH_PINNED) == 0) {
if (len > I830_BATCH_LIMIT)
return -ENOSPC;
- ret = intel_ring_begin(req, 6 + 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 6 + 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
/* Blit the batch (which has now all relocs applied) to the
* stable batch scratch bo area (so that the CS never
* stumbles over its tlb invalidation bug) ...
*/
- intel_ring_emit(ring, SRC_COPY_BLT_CMD | BLT_WRITE_RGBA);
- intel_ring_emit(ring,
- BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096);
- intel_ring_emit(ring, DIV_ROUND_UP(len, 4096) << 16 | 4096);
- intel_ring_emit(ring, cs_offset);
- intel_ring_emit(ring, 4096);
- intel_ring_emit(ring, offset);
-
- intel_ring_emit(ring, MI_FLUSH);
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = SRC_COPY_BLT_CMD | BLT_WRITE_RGBA;
+ *cs++ = BLT_DEPTH_32 | BLT_ROP_SRC_COPY | 4096;
+ *cs++ = DIV_ROUND_UP(len, 4096) << 16 | 4096;
+ *cs++ = cs_offset;
+ *cs++ = 4096;
+ *cs++ = offset;
+
+ *cs++ = MI_FLUSH;
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
/* ... and execute it. */
offset = cs_offset;
}
- ret = intel_ring_begin(req, 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
- intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_NON_SECURE));
- intel_ring_advance(ring);
+ *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+ *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
+ MI_BATCH_NON_SECURE);
+ intel_ring_advance(req, cs);
return 0;
}
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- struct intel_ring *ring = req->ring;
- int ret;
+ u32 *cs;
- ret = intel_ring_begin(req, 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring, MI_BATCH_BUFFER_START | MI_BATCH_GTT);
- intel_ring_emit(ring, offset | (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_NON_SECURE));
- intel_ring_advance(ring);
+ *cs++ = MI_BATCH_BUFFER_START | MI_BATCH_GTT;
+ *cs++ = offset | (dispatch_flags & I915_DISPATCH_SECURE ? 0 :
+ MI_BATCH_NON_SECURE);
+ intel_ring_advance(req, cs);
return 0;
}
{
struct drm_i915_private *dev_priv = engine->i915;
+ GEM_BUG_ON(engine->id != RCS);
+
dev_priv->status_page_dmah =
drm_pci_alloc(&dev_priv->drm, PAGE_SIZE, PAGE_SIZE);
if (!dev_priv->status_page_dmah)
return 0;
}
-int intel_ring_pin(struct intel_ring *ring, unsigned int offset_bias)
+int intel_ring_pin(struct intel_ring *ring,
+ struct drm_i915_private *i915,
+ unsigned int offset_bias)
{
- unsigned int flags;
- enum i915_map_type map;
+ enum i915_map_type map = HAS_LLC(i915) ? I915_MAP_WB : I915_MAP_WC;
struct i915_vma *vma = ring->vma;
+ unsigned int flags;
void *addr;
int ret;
GEM_BUG_ON(ring->vaddr);
- map = HAS_LLC(ring->engine->i915) ? I915_MAP_WB : I915_MAP_WC;
flags = PIN_GLOBAL;
if (offset_bias)
return PTR_ERR(addr);
}
+void intel_ring_reset(struct intel_ring *ring, u32 tail)
+{
+ GEM_BUG_ON(!list_empty(&ring->request_list));
+ ring->tail = tail;
+ ring->head = tail;
+ ring->emit = tail;
+ intel_ring_update_space(ring);
+}
+
void intel_ring_unpin(struct intel_ring *ring)
{
GEM_BUG_ON(!ring->vma);
GEM_BUG_ON(!ring->vaddr);
+ /* Discard any unused bytes beyond that submitted to hw. */
+ intel_ring_reset(ring, ring->tail);
+
if (i915_vma_is_map_and_fenceable(ring->vma))
i915_vma_unpin_iomap(ring->vma);
else
obj = i915_gem_object_create_stolen(dev_priv, size);
if (!obj)
- obj = i915_gem_object_create(dev_priv, size);
+ obj = i915_gem_object_create_internal(dev_priv, size);
if (IS_ERR(obj))
return ERR_CAST(obj);
if (!ring)
return ERR_PTR(-ENOMEM);
- ring->engine = engine;
-
INIT_LIST_HEAD(&ring->request_list);
ring->size = size;
if (IS_I830(engine->i915) || IS_I845G(engine->i915))
ring->effective_size -= 2 * CACHELINE_BYTES;
- ring->last_retired_head = -1;
intel_ring_update_space(ring);
vma = intel_ring_create_vma(engine->i915, size);
kfree(ring);
}
-static int context_pin(struct i915_gem_context *ctx, unsigned int flags)
+static int context_pin(struct i915_gem_context *ctx)
{
struct i915_vma *vma = ctx->engine[RCS].state;
int ret;
return ret;
}
- return i915_vma_pin(vma, 0, ctx->ggtt_alignment, PIN_GLOBAL | flags);
+ return i915_vma_pin(vma, 0, I915_GTT_MIN_ALIGNMENT,
+ PIN_GLOBAL | PIN_HIGH);
+}
+
+static struct i915_vma *
+alloc_context_vma(struct intel_engine_cs *engine)
+{
+ struct drm_i915_private *i915 = engine->i915;
+ struct drm_i915_gem_object *obj;
+ struct i915_vma *vma;
+
+ obj = i915_gem_object_create(i915, engine->context_size);
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ /*
+ * Try to make the context utilize L3 as well as LLC.
+ *
+ * On VLV we don't have L3 controls in the PTEs so we
+ * shouldn't touch the cache level, especially as that
+ * would make the object snooped which might have a
+ * negative performance impact.
+ *
+ * Snooping is required on non-llc platforms in execlist
+ * mode, but since all GGTT accesses use PAT entry 0 we
+ * get snooping anyway regardless of cache_level.
+ *
+ * This is only applicable for Ivy Bridge devices since
+ * later platforms don't have L3 control bits in the PTE.
+ */
+ if (IS_IVYBRIDGE(i915)) {
+ /* Ignore any error, regard it as a simple optimisation */
+ i915_gem_object_set_cache_level(obj, I915_CACHE_L3_LLC);
+ }
+
+ vma = i915_vma_instance(obj, &i915->ggtt.base, NULL);
+ if (IS_ERR(vma))
+ i915_gem_object_put(obj);
+
+ return vma;
}
-static int intel_ring_context_pin(struct intel_engine_cs *engine,
- struct i915_gem_context *ctx)
+static struct intel_ring *
+intel_ring_context_pin(struct intel_engine_cs *engine,
+ struct i915_gem_context *ctx)
{
struct intel_context *ce = &ctx->engine[engine->id];
int ret;
lockdep_assert_held(&ctx->i915->drm.struct_mutex);
- if (ce->pin_count++)
- return 0;
+ if (likely(ce->pin_count++))
+ goto out;
+ GEM_BUG_ON(!ce->pin_count); /* no overflow please! */
- if (ce->state) {
- unsigned int flags;
+ if (!ce->state && engine->context_size) {
+ struct i915_vma *vma;
+
+ vma = alloc_context_vma(engine);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto err;
+ }
- flags = 0;
- if (i915_gem_context_is_kernel(ctx))
- flags = PIN_HIGH;
+ ce->state = vma;
+ }
- ret = context_pin(ctx, flags);
+ if (ce->state) {
+ ret = context_pin(ctx);
if (ret)
- goto error;
+ goto err;
ce->state->obj->mm.dirty = true;
}
ce->initialised = true;
i915_gem_context_get(ctx);
- return 0;
-error:
+out:
+ /* One ringbuffer to rule them all */
+ return engine->buffer;
+
+err:
ce->pin_count = 0;
- return ret;
+ return ERR_PTR(ret);
}
static void intel_ring_context_unpin(struct intel_engine_cs *engine,
static int intel_init_ring_buffer(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv = engine->i915;
struct intel_ring *ring;
- int ret;
-
- WARN_ON(engine->buffer);
+ int err;
intel_engine_setup_common(engine);
- ret = intel_engine_init_common(engine);
- if (ret)
- goto error;
+ err = intel_engine_init_common(engine);
+ if (err)
+ goto err;
+
+ if (HWS_NEEDS_PHYSICAL(engine->i915))
+ err = init_phys_status_page(engine);
+ else
+ err = init_status_page(engine);
+ if (err)
+ goto err;
ring = intel_engine_create_ring(engine, 32 * PAGE_SIZE);
if (IS_ERR(ring)) {
- ret = PTR_ERR(ring);
- goto error;
- }
-
- if (HWS_NEEDS_PHYSICAL(dev_priv)) {
- WARN_ON(engine->id != RCS);
- ret = init_phys_status_page(engine);
- if (ret)
- goto error;
- } else {
- ret = init_status_page(engine);
- if (ret)
- goto error;
+ err = PTR_ERR(ring);
+ goto err_hws;
}
/* Ring wraparound at offset 0 sometimes hangs. No idea why. */
- ret = intel_ring_pin(ring, I915_GTT_PAGE_SIZE);
- if (ret) {
- intel_ring_free(ring);
- goto error;
- }
+ err = intel_ring_pin(ring, engine->i915, I915_GTT_PAGE_SIZE);
+ if (err)
+ goto err_ring;
+
+ GEM_BUG_ON(engine->buffer);
engine->buffer = ring;
return 0;
-error:
- intel_engine_cleanup(engine);
- return ret;
+err_ring:
+ intel_ring_free(ring);
+err_hws:
+ if (HWS_NEEDS_PHYSICAL(engine->i915))
+ cleanup_phys_status_page(engine);
+ else
+ cleanup_status_page(engine);
+err:
+ intel_engine_cleanup_common(engine);
+ return err;
}
void intel_engine_cleanup(struct intel_engine_cs *engine)
{
- struct drm_i915_private *dev_priv;
-
- dev_priv = engine->i915;
+ struct drm_i915_private *dev_priv = engine->i915;
- if (engine->buffer) {
- WARN_ON(INTEL_GEN(dev_priv) > 2 &&
- (I915_READ_MODE(engine) & MODE_IDLE) == 0);
+ WARN_ON(INTEL_GEN(dev_priv) > 2 &&
+ (I915_READ_MODE(engine) & MODE_IDLE) == 0);
- intel_ring_unpin(engine->buffer);
- intel_ring_free(engine->buffer);
- engine->buffer = NULL;
- }
+ intel_ring_unpin(engine->buffer);
+ intel_ring_free(engine->buffer);
if (engine->cleanup)
engine->cleanup(engine);
- if (HWS_NEEDS_PHYSICAL(dev_priv)) {
- WARN_ON(engine->id != RCS);
+ if (HWS_NEEDS_PHYSICAL(dev_priv))
cleanup_phys_status_page(engine);
- } else {
+ else
cleanup_status_page(engine);
- }
intel_engine_cleanup_common(engine);
- engine->i915 = NULL;
dev_priv->engine[engine->id] = NULL;
kfree(engine);
}
struct intel_engine_cs *engine;
enum intel_engine_id id;
- for_each_engine(engine, dev_priv, id) {
- engine->buffer->head = engine->buffer->tail;
- engine->buffer->last_retired_head = -1;
- }
+ /* Restart from the beginning of the rings for convenience */
+ for_each_engine(engine, dev_priv, id)
+ intel_ring_reset(engine->buffer, 0);
}
static int ring_request_alloc(struct drm_i915_gem_request *request)
{
- int ret;
+ u32 *cs;
GEM_BUG_ON(!request->ctx->engine[request->engine->id].pin_count);
*/
request->reserved_space += LEGACY_REQUEST_SIZE;
- GEM_BUG_ON(!request->engine->buffer);
- request->ring = request->engine->buffer;
-
- ret = intel_ring_begin(request, 0);
- if (ret)
- return ret;
+ cs = intel_ring_begin(request, 0);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
request->reserved_space -= LEGACY_REQUEST_SIZE;
return 0;
}
-static int wait_for_space(struct drm_i915_gem_request *req, int bytes)
+static noinline int wait_for_space(struct drm_i915_gem_request *req,
+ unsigned int bytes)
{
struct intel_ring *ring = req->ring;
struct drm_i915_gem_request *target;
lockdep_assert_held(&req->i915->drm.struct_mutex);
- intel_ring_update_space(ring);
- if (ring->space >= bytes)
+ if (intel_ring_update_space(ring) >= bytes)
return 0;
/*
GEM_BUG_ON(!req->reserved_space);
list_for_each_entry(target, &ring->request_list, ring_link) {
- unsigned space;
-
/* Would completion of this request free enough space? */
- space = __intel_ring_space(target->postfix, ring->tail,
- ring->size);
- if (space >= bytes)
+ if (bytes <= __intel_ring_space(target->postfix,
+ ring->emit, ring->size))
break;
}
return 0;
}
-int intel_ring_begin(struct drm_i915_gem_request *req, int num_dwords)
+u32 *intel_ring_begin(struct drm_i915_gem_request *req,
+ unsigned int num_dwords)
{
struct intel_ring *ring = req->ring;
- int remain_actual = ring->size - ring->tail;
- int remain_usable = ring->effective_size - ring->tail;
- int bytes = num_dwords * sizeof(u32);
- int total_bytes, wait_bytes;
- bool need_wrap = false;
+ const unsigned int remain_usable = ring->effective_size - ring->emit;
+ const unsigned int bytes = num_dwords * sizeof(u32);
+ unsigned int need_wrap = 0;
+ unsigned int total_bytes;
+ u32 *cs;
total_bytes = bytes + req->reserved_space;
+ GEM_BUG_ON(total_bytes > ring->effective_size);
- if (unlikely(bytes > remain_usable)) {
- /*
- * Not enough space for the basic request. So need to flush
- * out the remainder and then wait for base + reserved.
- */
- wait_bytes = remain_actual + total_bytes;
- need_wrap = true;
- } else if (unlikely(total_bytes > remain_usable)) {
- /*
- * The base request will fit but the reserved space
- * falls off the end. So we don't need an immediate wrap
- * and only need to effectively wait for the reserved
- * size space from the start of ringbuffer.
- */
- wait_bytes = remain_actual + req->reserved_space;
- } else {
- /* No wrapping required, just waiting. */
- wait_bytes = total_bytes;
+ if (unlikely(total_bytes > remain_usable)) {
+ const int remain_actual = ring->size - ring->emit;
+
+ if (bytes > remain_usable) {
+ /*
+ * Not enough space for the basic request. So need to
+ * flush out the remainder and then wait for
+ * base + reserved.
+ */
+ total_bytes += remain_actual;
+ need_wrap = remain_actual | 1;
+ } else {
+ /*
+ * The base request will fit but the reserved space
+ * falls off the end. So we don't need an immediate
+ * wrap and only need to effectively wait for the
+ * reserved size from the start of ringbuffer.
+ */
+ total_bytes = req->reserved_space + remain_actual;
+ }
}
- if (wait_bytes > ring->space) {
- int ret = wait_for_space(req, wait_bytes);
+ if (unlikely(total_bytes > ring->space)) {
+ int ret = wait_for_space(req, total_bytes);
if (unlikely(ret))
- return ret;
+ return ERR_PTR(ret);
}
if (unlikely(need_wrap)) {
- GEM_BUG_ON(remain_actual > ring->space);
- GEM_BUG_ON(ring->tail + remain_actual > ring->size);
+ need_wrap &= ~1;
+ GEM_BUG_ON(need_wrap > ring->space);
+ GEM_BUG_ON(ring->emit + need_wrap > ring->size);
/* Fill the tail with MI_NOOP */
- memset(ring->vaddr + ring->tail, 0, remain_actual);
- ring->tail = 0;
- ring->space -= remain_actual;
+ memset(ring->vaddr + ring->emit, 0, need_wrap);
+ ring->emit = 0;
+ ring->space -= need_wrap;
}
+ GEM_BUG_ON(ring->emit > ring->size - bytes);
+ GEM_BUG_ON(ring->space < bytes);
+ cs = ring->vaddr + ring->emit;
+ GEM_DEBUG_EXEC(memset(cs, POISON_INUSE, bytes));
+ ring->emit += bytes;
ring->space -= bytes;
- GEM_BUG_ON(ring->space < 0);
- return 0;
+
+ return cs;
}
/* Align the ring tail to a cacheline boundary */
int intel_ring_cacheline_align(struct drm_i915_gem_request *req)
{
- struct intel_ring *ring = req->ring;
int num_dwords =
- (ring->tail & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
- int ret;
+ (req->ring->emit & (CACHELINE_BYTES - 1)) / sizeof(uint32_t);
+ u32 *cs;
if (num_dwords == 0)
return 0;
num_dwords = CACHELINE_BYTES / sizeof(uint32_t) - num_dwords;
- ret = intel_ring_begin(req, num_dwords);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, num_dwords);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
while (num_dwords--)
- intel_ring_emit(ring, MI_NOOP);
+ *cs++ = MI_NOOP;
- intel_ring_advance(ring);
+ intel_ring_advance(req, cs);
return 0;
}
I915_WRITE64_FW(GEN6_BSD_RNCID, 0x0);
/* Wait for the ring not to be idle, i.e. for it to wake up. */
- if (intel_wait_for_register_fw(dev_priv,
- GEN6_BSD_SLEEP_PSMI_CONTROL,
- GEN6_BSD_SLEEP_INDICATOR,
- 0,
- 50))
+ if (__intel_wait_for_register_fw(dev_priv,
+ GEN6_BSD_SLEEP_PSMI_CONTROL,
+ GEN6_BSD_SLEEP_INDICATOR,
+ 0,
+ 1000, 0, NULL))
DRM_ERROR("timed out waiting for the BSD ring to wake up\n");
/* Now that the ring is fully powered up, update the tail */
static int gen6_bsd_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_ring *ring = req->ring;
- uint32_t cmd;
- int ret;
+ u32 cmd, *cs;
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
cmd = MI_FLUSH_DW;
if (INTEL_GEN(req->i915) >= 8)
if (mode & EMIT_INVALIDATE)
cmd |= MI_INVALIDATE_TLB | MI_INVALIDATE_BSD;
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring, I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ *cs++ = cmd;
+ *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
if (INTEL_GEN(req->i915) >= 8) {
- intel_ring_emit(ring, 0); /* upper addr */
- intel_ring_emit(ring, 0); /* value */
+ *cs++ = 0; /* upper addr */
+ *cs++ = 0; /* value */
} else {
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
}
- intel_ring_advance(ring);
+ intel_ring_advance(req, cs);
return 0;
}
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- struct intel_ring *ring = req->ring;
bool ppgtt = USES_PPGTT(req->i915) &&
!(dispatch_flags & I915_DISPATCH_SECURE);
- int ret;
+ u32 *cs;
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
/* FIXME(BDW): Address space and security selectors. */
- intel_ring_emit(ring, MI_BATCH_BUFFER_START_GEN8 | (ppgtt<<8) |
- (dispatch_flags & I915_DISPATCH_RS ?
- MI_BATCH_RESOURCE_STREAMER : 0));
- intel_ring_emit(ring, lower_32_bits(offset));
- intel_ring_emit(ring, upper_32_bits(offset));
- intel_ring_emit(ring, MI_NOOP);
- intel_ring_advance(ring);
+ *cs++ = MI_BATCH_BUFFER_START_GEN8 | (ppgtt << 8) | (dispatch_flags &
+ I915_DISPATCH_RS ? MI_BATCH_RESOURCE_STREAMER : 0);
+ *cs++ = lower_32_bits(offset);
+ *cs++ = upper_32_bits(offset);
+ *cs++ = MI_NOOP;
+ intel_ring_advance(req, cs);
return 0;
}
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- struct intel_ring *ring = req->ring;
- int ret;
+ u32 *cs;
- ret = intel_ring_begin(req, 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring,
- MI_BATCH_BUFFER_START |
- (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
- (dispatch_flags & I915_DISPATCH_RS ?
- MI_BATCH_RESOURCE_STREAMER : 0));
+ *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_PPGTT_HSW | MI_BATCH_NON_SECURE_HSW) |
+ (dispatch_flags & I915_DISPATCH_RS ?
+ MI_BATCH_RESOURCE_STREAMER : 0);
/* bit0-7 is the length on GEN6+ */
- intel_ring_emit(ring, offset);
- intel_ring_advance(ring);
+ *cs++ = offset;
+ intel_ring_advance(req, cs);
return 0;
}
u64 offset, u32 len,
unsigned int dispatch_flags)
{
- struct intel_ring *ring = req->ring;
- int ret;
+ u32 *cs;
- ret = intel_ring_begin(req, 2);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 2);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
- intel_ring_emit(ring,
- MI_BATCH_BUFFER_START |
- (dispatch_flags & I915_DISPATCH_SECURE ?
- 0 : MI_BATCH_NON_SECURE_I965));
+ *cs++ = MI_BATCH_BUFFER_START | (dispatch_flags & I915_DISPATCH_SECURE ?
+ 0 : MI_BATCH_NON_SECURE_I965);
/* bit0-7 is the length on GEN6+ */
- intel_ring_emit(ring, offset);
- intel_ring_advance(ring);
+ *cs++ = offset;
+ intel_ring_advance(req, cs);
return 0;
}
static int gen6_ring_flush(struct drm_i915_gem_request *req, u32 mode)
{
- struct intel_ring *ring = req->ring;
- uint32_t cmd;
- int ret;
+ u32 cmd, *cs;
- ret = intel_ring_begin(req, 4);
- if (ret)
- return ret;
+ cs = intel_ring_begin(req, 4);
+ if (IS_ERR(cs))
+ return PTR_ERR(cs);
cmd = MI_FLUSH_DW;
if (INTEL_GEN(req->i915) >= 8)
*/
if (mode & EMIT_INVALIDATE)
cmd |= MI_INVALIDATE_TLB;
- intel_ring_emit(ring, cmd);
- intel_ring_emit(ring,
- I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT);
+ *cs++ = cmd;
+ *cs++ = I915_GEM_HWS_SCRATCH_ADDR | MI_FLUSH_DW_USE_GTT;
if (INTEL_GEN(req->i915) >= 8) {
- intel_ring_emit(ring, 0); /* upper addr */
- intel_ring_emit(ring, 0); /* value */
+ *cs++ = 0; /* upper addr */
+ *cs++ = 0; /* value */
} else {
- intel_ring_emit(ring, 0);
- intel_ring_emit(ring, MI_NOOP);
+ *cs++ = 0;
+ *cs++ = MI_NOOP;
}
- intel_ring_advance(ring);
+ intel_ring_advance(req, cs);
return 0;
}
}
}
+static void i9xx_set_default_submission(struct intel_engine_cs *engine)
+{
+ engine->submit_request = i9xx_submit_request;
+}
+
+static void gen6_bsd_set_default_submission(struct intel_engine_cs *engine)
+{
+ engine->submit_request = gen6_bsd_submit_request;
+}
+
static void intel_ring_default_vfuncs(struct drm_i915_private *dev_priv,
struct intel_engine_cs *engine)
{
engine->emit_breadcrumb = gen6_sema_emit_breadcrumb;
- num_rings = hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
+ num_rings = INTEL_INFO(dev_priv)->num_rings - 1;
if (INTEL_GEN(dev_priv) >= 8) {
engine->emit_breadcrumb_sz += num_rings * 6;
} else {
engine->emit_breadcrumb_sz++;
}
}
- engine->submit_request = i9xx_submit_request;
+
+ engine->set_default_submission = i9xx_set_default_submission;
if (INTEL_GEN(dev_priv) >= 8)
engine->emit_bb_start = gen8_emit_bb_start;
engine->semaphore.signal = gen8_rcs_signal;
- num_rings =
- hweight32(INTEL_INFO(dev_priv)->ring_mask) - 1;
- engine->emit_breadcrumb_sz += num_rings * 6;
+ num_rings = INTEL_INFO(dev_priv)->num_rings - 1;
+ engine->emit_breadcrumb_sz += num_rings * 8;
}
} else if (INTEL_GEN(dev_priv) >= 6) {
engine->init_context = intel_rcs_ctx_init;
if (INTEL_GEN(dev_priv) >= 6) {
/* gen6 bsd needs a special wa for tail updates */
if (IS_GEN6(dev_priv))
- engine->submit_request = gen6_bsd_submit_request;
+ engine->set_default_submission = gen6_bsd_set_default_submission;
engine->emit_flush = gen6_bsd_ring_flush;
if (INTEL_GEN(dev_priv) < 8)
engine->irq_enable_mask = GT_BSD_USER_INTERRUPT;
return intel_init_ring_buffer(engine);
}
-/**
- * Initialize the second BSD ring (eg. Broadwell GT3, Skylake GT3)
- */
-int intel_init_bsd2_ring_buffer(struct intel_engine_cs *engine)
-{
- struct drm_i915_private *dev_priv = engine->i915;
-
- intel_ring_default_vfuncs(dev_priv, engine);
-
- engine->emit_flush = gen6_bsd_ring_flush;
-
- return intel_init_ring_buffer(engine);
-}
-
int intel_init_blt_ring_buffer(struct intel_engine_cs *engine)
{
struct drm_i915_private *dev_priv = engine->i915;