drm/i915: Unconditionally flush any chipset buffers before execbuf
authorChris Wilson <chris@chris-wilson.co.uk>
Thu, 18 Aug 2016 16:16:40 +0000 (17:16 +0100)
committerJani Nikula <jani.nikula@intel.com>
Mon, 22 Aug 2016 13:04:50 +0000 (16:04 +0300)
If userspace is asynchronously streaming into the batch or other
execobjects, we may not flush those writes along with a change in cache
domain (as there is no change). Therefore those writes may end up in
internal chipset buffers and not visible to the GPU upon execution. We
must issue a flush command or otherwise we encounter incoherency in the
batchbuffers and the GPU executing invalid commands (i.e. hanging) quite
regularly.

v2: Throw a paranoid wmb() into the general flush so that we remain
consistent with before.

Bugzilla: https://bugs.freedesktop.org/show_bug.cgi?id=90841
Fixes: 1816f9236303 ("drm/i915: Support creation of unbound wc user...")
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Akash Goel <akash.goel@intel.com>
Cc: Daniel Vetter <daniel.vetter@ffwll.ch>
Cc: Tvrtko Ursulin <tvrtko.ursulin@linux.intel.com>
Tested-by: Matti Hämäläinen <ccr@tnsp.org>
Cc: stable@vger.kernel.org
Reviewed-by: Mika Kuoppala <mika.kuoppala@intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160818161718.27187-1-chris@chris-wilson.co.uk
(cherry picked from commit 600f436801deae65e48404847b61c89b4944e355)
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
drivers/gpu/drm/i915/i915_drv.h
drivers/gpu/drm/i915/i915_gem_execbuffer.c

index 20fe9d52e2562ffb256ced679638026cf234fd34..3ca674e9c105a02738139550702db669ca2ce33e 100644 (file)
@@ -3591,6 +3591,7 @@ int i915_gem_evict_vm(struct i915_address_space *vm, bool do_idle);
 /* belongs in i915_gem_gtt.h */
 static inline void i915_gem_chipset_flush(struct drm_i915_private *dev_priv)
 {
+       wmb();
        if (INTEL_GEN(dev_priv) < 6)
                intel_gtt_chipset_flush();
 }
index 1978633e7549ad4e7c7fd0f0d4eacfe4fce229c9..b35e5b6475b245a9061a5437c919e1716d409894 100644 (file)
@@ -943,8 +943,6 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
 {
        const unsigned other_rings = ~intel_engine_flag(req->engine);
        struct i915_vma *vma;
-       uint32_t flush_domains = 0;
-       bool flush_chipset = false;
        int ret;
 
        list_for_each_entry(vma, vmas, exec_list) {
@@ -957,16 +955,11 @@ i915_gem_execbuffer_move_to_gpu(struct drm_i915_gem_request *req,
                }
 
                if (obj->base.write_domain & I915_GEM_DOMAIN_CPU)
-                       flush_chipset |= i915_gem_clflush_object(obj, false);
-
-               flush_domains |= obj->base.write_domain;
+                       i915_gem_clflush_object(obj, false);
        }
 
-       if (flush_chipset)
-               i915_gem_chipset_flush(req->engine->i915);
-
-       if (flush_domains & I915_GEM_DOMAIN_GTT)
-               wmb();
+       /* Unconditionally flush any chipset caches (for streaming writes). */
+       i915_gem_chipset_flush(req->engine->i915);
 
        /* Unconditionally invalidate gpu caches and ensure that we do flush
         * any residual writes from the previous batch.