drm/i915: Don't clflush before release phys object
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 23 Dec 2016 14:57:57 +0000 (14:57 +0000)
committerJani Nikula <jani.nikula@intel.com>
Tue, 3 Jan 2017 09:41:38 +0000 (11:41 +0200)
When we teardown the backing storage for the phys object, we copy from
the coherent contiguous block back to the shmemfs object, clflushing as
we go. Trying to clflush the invalid sg beforehand just oops and would
be redundant (due to it already being coherent, and clflushed
afterwards).

Reported-by: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Cc: <drm-intel-fixes@lists.freedesktop.org>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20161223145804.6605-3-chris@chris-wilson.co.uk
(cherry picked from commit e5facdf9644f4490520e0489a0252e8feaba3744)
Signed-off-by: Jani Nikula <jani.nikula@intel.com>
drivers/gpu/drm/i915/i915_gem.c

index 4a31b7a891ecaf3e2732c9f2d6ce62fa633419f6..30a1322c867757786cf6e861d5bd5b6372549205 100644 (file)
@@ -244,14 +244,16 @@ err_phys:
 
 static void
 __i915_gem_object_release_shmem(struct drm_i915_gem_object *obj,
-                               struct sg_table *pages)
+                               struct sg_table *pages,
+                               bool needs_clflush)
 {
        GEM_BUG_ON(obj->mm.madv == __I915_MADV_PURGED);
 
        if (obj->mm.madv == I915_MADV_DONTNEED)
                obj->mm.dirty = false;
 
-       if ((obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
+       if (needs_clflush &&
+           (obj->base.read_domains & I915_GEM_DOMAIN_CPU) == 0 &&
            !cpu_cache_is_coherent(obj->base.dev, obj->cache_level))
                drm_clflush_sg(pages);
 
@@ -263,7 +265,7 @@ static void
 i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
                               struct sg_table *pages)
 {
-       __i915_gem_object_release_shmem(obj, pages);
+       __i915_gem_object_release_shmem(obj, pages, false);
 
        if (obj->mm.dirty) {
                struct address_space *mapping = obj->base.filp->f_mapping;
@@ -2231,7 +2233,7 @@ i915_gem_object_put_pages_gtt(struct drm_i915_gem_object *obj,
        struct sgt_iter sgt_iter;
        struct page *page;
 
-       __i915_gem_object_release_shmem(obj, pages);
+       __i915_gem_object_release_shmem(obj, pages, true);
 
        i915_gem_gtt_finish_pages(obj, pages);