X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=drivers%2Fgpu%2Fdrm%2Fi915%2Fintel_ringbuffer.c;h=bfb5d75cc1c5543e8b3b903a4fc9caa2e7ab756f;hb=61a563a2a6ada193a022666b51ec8bb64112efc5;hp=31b36c5ac8941e844cd9f995b0c4e575219fc8ea;hpb=208937fdcfe780c91a94eb95a7082455f4aea8a4;p=linux-2.6-block.git diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c index 31b36c5ac894..bfb5d75cc1c5 100644 --- a/drivers/gpu/drm/i915/intel_ringbuffer.c +++ b/drivers/gpu/drm/i915/intel_ringbuffer.c @@ -440,15 +440,17 @@ static int init_ring_common(struct intel_ring_buffer *ring) gen6_gt_force_wake_get(dev_priv, FORCEWAKE_ALL); - if (I915_NEED_GFX_HWS(dev)) - intel_ring_setup_status_page(ring); - else - ring_setup_phys_status_page(ring); - /* Stop the ring if it's running. */ I915_WRITE_CTL(ring, 0); I915_WRITE_HEAD(ring, 0); ring->write_tail(ring, 0); + if (wait_for_atomic((I915_READ_MODE(ring) & MODE_IDLE) != 0, 1000)) + DRM_ERROR("%s :timed out trying to stop ring\n", ring->name); + + if (I915_NEED_GFX_HWS(dev)) + intel_ring_setup_status_page(ring); + else + ring_setup_phys_status_page(ring); head = I915_READ_HEAD(ring) & HEAD_ADDR; @@ -531,9 +533,11 @@ init_pipe_control(struct intel_ring_buffer *ring) goto err; } - i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); + ret = i915_gem_object_set_cache_level(ring->scratch.obj, I915_CACHE_LLC); + if (ret) + goto err_unref; - ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, true, false); + ret = i915_gem_obj_ggtt_pin(ring->scratch.obj, 4096, 0); if (ret) goto err_unref; @@ -549,7 +553,7 @@ init_pipe_control(struct intel_ring_buffer *ring) return 0; err_unpin: - i915_gem_object_unpin(ring->scratch.obj); + i915_gem_object_ggtt_unpin(ring->scratch.obj); err_unref: drm_gem_object_unreference(&ring->scratch.obj->base); err: @@ -562,14 +566,15 @@ static int init_render_ring(struct intel_ring_buffer *ring) struct drm_i915_private *dev_priv = dev->dev_private; int ret = init_ring_common(ring); - if (INTEL_INFO(dev)->gen > 3) + /* WaTimedSingleVertexDispatch:cl,bw,ctg,elk,ilk,snb */ + if (INTEL_INFO(dev)->gen >= 4 && INTEL_INFO(dev)->gen < 7) I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(VS_TIMER_DISPATCH)); /* We need to disable the AsyncFlip performance optimisations in order * to use MI_WAIT_FOR_EVENT within the CS. It should already be * programmed to '1' on all products. * - * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv + * WaDisableAsyncFlipPerfMode:snb,ivb,hsw,vlv,bdw */ if (INTEL_INFO(dev)->gen >= 6) I915_WRITE(MI_MODE, _MASKED_BIT_ENABLE(ASYNC_FLIP_PERF_DISABLE)); @@ -625,7 +630,7 @@ static void render_ring_cleanup(struct intel_ring_buffer *ring) if (INTEL_INFO(dev)->gen >= 5) { kunmap(sg_page(ring->scratch.obj->pages->sgl)); - i915_gem_object_unpin(ring->scratch.obj); + i915_gem_object_ggtt_unpin(ring->scratch.obj); } drm_gem_object_unreference(&ring->scratch.obj->base); @@ -977,9 +982,19 @@ void intel_ring_setup_status_page(struct intel_ring_buffer *ring) I915_WRITE(mmio, (u32)ring->status_page.gfx_addr); POSTING_READ(mmio); - /* Flush the TLB for this page */ - if (INTEL_INFO(dev)->gen >= 6) { + /* + * Flush the TLB for this page + * + * FIXME: These two bits have disappeared on gen8, so a question + * arises: do we still need this and if so how should we go about + * invalidating the TLB? + */ + if (INTEL_INFO(dev)->gen >= 6 && INTEL_INFO(dev)->gen < 8) { u32 reg = RING_INSTPM(ring->mmio_base); + + /* ring should be idle before issuing a sync flush*/ + WARN_ON((I915_READ_MODE(ring) & MODE_IDLE) == 0); + I915_WRITE(reg, _MASKED_BIT_ENABLE(INSTPM_TLB_INVALIDATE | INSTPM_SYNC_FLUSH)); @@ -1253,7 +1268,7 @@ static void cleanup_status_page(struct intel_ring_buffer *ring) return; kunmap(sg_page(obj->pages->sgl)); - i915_gem_object_unpin(obj); + i915_gem_object_ggtt_unpin(obj); drm_gem_object_unreference(&obj->base); ring->status_page.obj = NULL; } @@ -1271,12 +1286,13 @@ static int init_status_page(struct intel_ring_buffer *ring) goto err; } - i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + ret = i915_gem_object_set_cache_level(obj, I915_CACHE_LLC); + if (ret) + goto err_unref; - ret = i915_gem_obj_ggtt_pin(obj, 4096, true, false); - if (ret != 0) { + ret = i915_gem_obj_ggtt_pin(obj, 4096, 0); + if (ret) goto err_unref; - } ring->status_page.gfx_addr = i915_gem_obj_ggtt_offset(obj); ring->status_page.page_addr = kmap(sg_page(obj->pages->sgl)); @@ -1293,7 +1309,7 @@ static int init_status_page(struct intel_ring_buffer *ring) return 0; err_unpin: - i915_gem_object_unpin(obj); + i915_gem_object_ggtt_unpin(obj); err_unref: drm_gem_object_unreference(&obj->base); err: @@ -1356,7 +1372,7 @@ static int intel_init_ring_buffer(struct drm_device *dev, ring->obj = obj; - ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, true, false); + ret = i915_gem_obj_ggtt_pin(obj, PAGE_SIZE, PIN_MAPPABLE); if (ret) goto err_unref; @@ -1385,12 +1401,14 @@ static int intel_init_ring_buffer(struct drm_device *dev, if (IS_I830(ring->dev) || IS_845G(ring->dev)) ring->effective_size -= 128; + i915_cmd_parser_init_ring(ring); + return 0; err_unmap: iounmap(ring->virtual_start); err_unpin: - i915_gem_object_unpin(obj); + i915_gem_object_ggtt_unpin(obj); err_unref: drm_gem_object_unreference(&obj->base); ring->obj = NULL; @@ -1418,7 +1436,7 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) iounmap(ring->virtual_start); - i915_gem_object_unpin(ring->obj); + i915_gem_object_ggtt_unpin(ring->obj); drm_gem_object_unreference(&ring->obj->base); ring->obj = NULL; ring->preallocated_lazy_request = NULL; @@ -1430,28 +1448,16 @@ void intel_cleanup_ring_buffer(struct intel_ring_buffer *ring) cleanup_status_page(ring); } -static int intel_ring_wait_seqno(struct intel_ring_buffer *ring, u32 seqno) -{ - int ret; - - ret = i915_wait_seqno(ring, seqno); - if (!ret) - i915_gem_retire_requests_ring(ring); - - return ret; -} - static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) { struct drm_i915_gem_request *request; - u32 seqno = 0; + u32 seqno = 0, tail; int ret; - i915_gem_retire_requests_ring(ring); - if (ring->last_retired_head != -1) { ring->head = ring->last_retired_head; ring->last_retired_head = -1; + ring->space = ring_space(ring); if (ring->space >= n) return 0; @@ -1468,6 +1474,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) space += ring->size; if (space >= n) { seqno = request->seqno; + tail = request->tail; break; } @@ -1482,15 +1489,11 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n) if (seqno == 0) return -ENOSPC; - ret = intel_ring_wait_seqno(ring, seqno); + ret = i915_wait_seqno(ring, seqno); if (ret) return ret; - if (WARN_ON(ring->last_retired_head == -1)) - return -ENOSPC; - - ring->head = ring->last_retired_head; - ring->last_retired_head = -1; + ring->head = tail; ring->space = ring_space(ring); if (WARN_ON(ring->space < n)) return -ENOSPC; @@ -1528,7 +1531,8 @@ static int ring_wait_for_space(struct intel_ring_buffer *ring, int n) return 0; } - if (dev->primary->master) { + if (!drm_core_check_feature(dev, DRIVER_MODESET) && + dev->primary->master) { struct drm_i915_master_private *master_priv = dev->primary->master->driver_priv; if (master_priv->sarea_priv) master_priv->sarea_priv->perf_boxes |= I915_BOX_WAIT; @@ -1954,7 +1958,7 @@ int intel_init_render_ring_buffer(struct drm_device *dev) return -ENOMEM; } - ret = i915_gem_obj_ggtt_pin(obj, 0, true, false); + ret = i915_gem_obj_ggtt_pin(obj, 0, 0); if (ret != 0) { drm_gem_object_unreference(&obj->base); DRM_ERROR("Failed to ping batch bo\n");