Merge tag 'drm-intel-next-2012-12-21' of git://people.freedesktop.org/~danvet/drm...
[linux-2.6-block.git] / drivers / gpu / drm / i915 / intel_ringbuffer.c
index 2bd074ad6f54f6da9b6b6eef9f699e952fbb18cb..59e02691baf39aa3a25afb5694855ae4e4f4b782 100644 (file)
@@ -45,7 +45,7 @@ struct pipe_control {
 
 static inline int ring_space(struct intel_ring_buffer *ring)
 {
-       int space = (ring->head & HEAD_ADDR) - (ring->tail + 8);
+       int space = (ring->head & HEAD_ADDR) - (ring->tail + I915_RING_FREE_SPACE);
        if (space < 0)
                space += ring->size;
        return space;
@@ -547,9 +547,14 @@ static int init_render_ring(struct intel_ring_buffer *ring)
 
 static void render_ring_cleanup(struct intel_ring_buffer *ring)
 {
+       struct drm_device *dev = ring->dev;
+
        if (!ring->private)
                return;
 
+       if (HAS_BROKEN_CS_TLB(dev))
+               drm_gem_object_unreference(to_gem_object(ring->private));
+
        cleanup_pipe_control(ring);
 }
 
@@ -998,6 +1003,8 @@ i965_dispatch_execbuffer(struct intel_ring_buffer *ring,
        return 0;
 }
 
+/* Just userspace ABI convention to limit the wa batch bo to a resonable size */
+#define I830_BATCH_LIMIT (256*1024)
 static int
 i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
                                u32 offset, u32 len,
@@ -1005,15 +1012,47 @@ i830_dispatch_execbuffer(struct intel_ring_buffer *ring,
 {
        int ret;
 
-       ret = intel_ring_begin(ring, 4);
-       if (ret)
-               return ret;
+       if (flags & I915_DISPATCH_PINNED) {
+               ret = intel_ring_begin(ring, 4);
+               if (ret)
+                       return ret;
 
-       intel_ring_emit(ring, MI_BATCH_BUFFER);
-       intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
-       intel_ring_emit(ring, offset + len - 8);
-       intel_ring_emit(ring, 0);
-       intel_ring_advance(ring);
+               intel_ring_emit(ring, MI_BATCH_BUFFER);
+               intel_ring_emit(ring, offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+               intel_ring_emit(ring, offset + len - 8);
+               intel_ring_emit(ring, MI_NOOP);
+               intel_ring_advance(ring);
+       } else {
+               struct drm_i915_gem_object *obj = ring->private;
+               u32 cs_offset = obj->gtt_offset;
+
+               if (len > I830_BATCH_LIMIT)
+                       return -ENOSPC;
+
+               ret = intel_ring_begin(ring, 9+3);
+               if (ret)
+                       return ret;
+               /* Blit the batch (which has now all relocs applied) to the stable batch
+                * scratch bo area (so that the CS never stumbles over its tlb
+                * invalidation bug) ... */
+               intel_ring_emit(ring, XY_SRC_COPY_BLT_CMD |
+                               XY_SRC_COPY_BLT_WRITE_ALPHA |
+                               XY_SRC_COPY_BLT_WRITE_RGB);
+               intel_ring_emit(ring, BLT_DEPTH_32 | BLT_ROP_GXCOPY | 4096);
+               intel_ring_emit(ring, 0);
+               intel_ring_emit(ring, (DIV_ROUND_UP(len, 4096) << 16) | 1024);
+               intel_ring_emit(ring, cs_offset);
+               intel_ring_emit(ring, 0);
+               intel_ring_emit(ring, 4096);
+               intel_ring_emit(ring, offset);
+               intel_ring_emit(ring, MI_FLUSH);
+
+               /* ... and execute it. */
+               intel_ring_emit(ring, MI_BATCH_BUFFER);
+               intel_ring_emit(ring, cs_offset | (flags & I915_DISPATCH_SECURE ? 0 : MI_BATCH_NON_SECURE));
+               intel_ring_emit(ring, cs_offset + len - 8);
+               intel_ring_advance(ring);
+       }
 
        return 0;
 }
@@ -1262,7 +1301,7 @@ static int intel_ring_wait_request(struct intel_ring_buffer *ring, int n)
                if (request->tail == -1)
                        continue;
 
-               space = request->tail - (ring->tail + 8);
+               space = request->tail - (ring->tail + I915_RING_FREE_SPACE);
                if (space < 0)
                        space += ring->size;
                if (space >= n) {
@@ -1655,6 +1694,27 @@ int intel_init_render_ring_buffer(struct drm_device *dev)
        ring->init = init_render_ring;
        ring->cleanup = render_ring_cleanup;
 
+       /* Workaround batchbuffer to combat CS tlb bug. */
+       if (HAS_BROKEN_CS_TLB(dev)) {
+               struct drm_i915_gem_object *obj;
+               int ret;
+
+               obj = i915_gem_alloc_object(dev, I830_BATCH_LIMIT);
+               if (obj == NULL) {
+                       DRM_ERROR("Failed to allocate batch bo\n");
+                       return -ENOMEM;
+               }
+
+               ret = i915_gem_object_pin(obj, 0, true, false);
+               if (ret != 0) {
+                       drm_gem_object_unreference(&obj->base);
+                       DRM_ERROR("Failed to ping batch bo\n");
+                       return ret;
+               }
+
+               ring->private = obj;
+       }
+
        return intel_init_ring_buffer(dev, ring);
 }