drm/i915: Embed the io-mapping struct inside drm_i915_private
authorChris Wilson <chris@chris-wilson.co.uk>
Fri, 19 Aug 2016 15:54:27 +0000 (16:54 +0100)
committerChris Wilson <chris@chris-wilson.co.uk>
Fri, 19 Aug 2016 16:13:35 +0000 (17:13 +0100)
As io_mapping.h now always allocates the struct, we can avoid that
allocation and extra pointer dance by embedding the struct inside
drm_i915_private

Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Joonas Lahtinen <joonas.lahtinen@linux.intel.com>
Link: http://patchwork.freedesktop.org/patch/msgid/20160819155428.1670-5-chris@chris-wilson.co.uk
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/i915_gem_gtt.c
drivers/gpu/drm/i915/i915_gem_gtt.h
drivers/gpu/drm/i915/i915_gpu_error.c
drivers/gpu/drm/i915/intel_overlay.c

index 0e1f5dde2e872016ac3f27bf50023e0d53445931..5398af7f7580bdb4399b98e47e58e40836fc50da 100644 (file)
@@ -891,7 +891,7 @@ i915_gem_gtt_pread(struct drm_device *dev,
                 * and write to user memory which may result into page
                 * faults, and so we cannot perform this under struct_mutex.
                 */
-               if (slow_user_access(ggtt->mappable, page_base,
+               if (slow_user_access(&ggtt->mappable, page_base,
                                     page_offset, user_data,
                                     page_length, false)) {
                        ret = -EFAULT;
@@ -1187,11 +1187,11 @@ i915_gem_gtt_pwrite_fast(struct drm_i915_private *i915,
                 * If the object is non-shmem backed, we retry again with the
                 * path that handles page fault.
                 */
-               if (fast_user_write(ggtt->mappable, page_base,
+               if (fast_user_write(&ggtt->mappable, page_base,
                                    page_offset, user_data, page_length)) {
                        hit_slow_path = true;
                        mutex_unlock(&dev->struct_mutex);
-                       if (slow_user_access(ggtt->mappable,
+                       if (slow_user_access(&ggtt->mappable,
                                             page_base,
                                             page_offset, user_data,
                                             page_length, true)) {
index 4192066ff60e2de73530122608b7988e19536888..601156c353cc3042578f5c97e57156f79b4e4378 100644 (file)
@@ -474,7 +474,7 @@ static void *reloc_iomap(struct drm_i915_gem_object *obj,
                offset += page << PAGE_SHIFT;
        }
 
-       vaddr = io_mapping_map_atomic_wc(cache->i915->ggtt.mappable, offset);
+       vaddr = io_mapping_map_atomic_wc(&cache->i915->ggtt.mappable, offset);
        cache->page = page;
        cache->vaddr = (unsigned long)vaddr;
 
index a18363a0d8c599adb3de3509d43982633a149246..b90fdcee992ac8066fed6b752e5a05d59997ac6a 100644 (file)
@@ -2794,7 +2794,6 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
 
        if (dev_priv->mm.aliasing_ppgtt) {
                struct i915_hw_ppgtt *ppgtt = dev_priv->mm.aliasing_ppgtt;
-
                ppgtt->base.cleanup(&ppgtt->base);
                kfree(ppgtt);
        }
@@ -2811,7 +2810,7 @@ void i915_ggtt_cleanup_hw(struct drm_i915_private *dev_priv)
        ggtt->base.cleanup(&ggtt->base);
 
        arch_phys_wc_del(ggtt->mtrr);
-       io_mapping_free(ggtt->mappable);
+       io_mapping_fini(&ggtt->mappable);
 }
 
 static unsigned int gen6_get_total_gtt_size(u16 snb_gmch_ctl)
@@ -3209,9 +3208,9 @@ int i915_ggtt_init_hw(struct drm_i915_private *dev_priv)
        if (!HAS_LLC(dev_priv))
                ggtt->base.mm.color_adjust = i915_gtt_color_adjust;
 
-       ggtt->mappable =
-               io_mapping_create_wc(ggtt->mappable_base, ggtt->mappable_end);
-       if (!ggtt->mappable) {
+       if (!io_mapping_init_wc(&dev_priv->ggtt.mappable,
+                               dev_priv->ggtt.mappable_base,
+                               dev_priv->ggtt.mappable_end)) {
                ret = -EIO;
                goto out_gtt_cleanup;
        }
@@ -3681,7 +3680,7 @@ void __iomem *i915_vma_pin_iomap(struct i915_vma *vma)
 
        ptr = vma->iomap;
        if (ptr == NULL) {
-               ptr = io_mapping_map_wc(i915_vm_to_ggtt(vma->vm)->mappable,
+               ptr = io_mapping_map_wc(&i915_vm_to_ggtt(vma->vm)->mappable,
                                        vma->node.start,
                                        vma->node.size);
                if (ptr == NULL)
index a15cea73f7290997abc668eb4584070304b87340..a9aec25535acf41f2327a1d8dc3c57a6887364df 100644 (file)
@@ -439,13 +439,13 @@ struct i915_address_space {
  */
 struct i915_ggtt {
        struct i915_address_space base;
+       struct io_mapping mappable;     /* Mapping to our CPU mappable region */
 
        size_t stolen_size;             /* Total size of stolen memory */
        size_t stolen_usable_size;      /* Total size minus BIOS reserved */
        size_t stolen_reserved_base;
        size_t stolen_reserved_size;
        u64 mappable_end;               /* End offset that we can CPU map */
-       struct io_mapping *mappable;    /* Mapping to our CPU mappable region */
        phys_addr_t mappable_base;      /* PA of our GMADR */
 
        /** "Graphics Stolen Memory" holds the global PTEs */
index 84dd5bc06db3ca570ca0f8a1fb69deb82c9ac50b..41ec7a183c73d9249a47c356ac2212ccb469c426 100644 (file)
@@ -729,7 +729,7 @@ i915_error_object_create(struct drm_i915_private *dev_priv,
                         * captures what the GPU read.
                         */
 
-                       s = io_mapping_map_atomic_wc(ggtt->mappable,
+                       s = io_mapping_map_atomic_wc(&ggtt->mappable,
                                                     reloc_offset);
                        memcpy_fromio(d, s, PAGE_SIZE);
                        io_mapping_unmap_atomic(s);
index 3cf8d02064a8035503c21c2fee4f41280a7559c8..a24bc8c7889f04ce3f3d7e2351404e43f777ceef 100644 (file)
@@ -196,7 +196,7 @@ intel_overlay_map_regs(struct intel_overlay *overlay)
        if (OVERLAY_NEEDS_PHYSICAL(dev_priv))
                regs = (struct overlay_registers __iomem *)overlay->reg_bo->phys_handle->vaddr;
        else
-               regs = io_mapping_map_wc(dev_priv->ggtt.mappable,
+               regs = io_mapping_map_wc(&dev_priv->ggtt.mappable,
                                         overlay->flip_addr,
                                         PAGE_SIZE);
 
@@ -1489,7 +1489,7 @@ intel_overlay_map_regs_atomic(struct intel_overlay *overlay)
                regs = (struct overlay_registers __iomem *)
                        overlay->reg_bo->phys_handle->vaddr;
        else
-               regs = io_mapping_map_atomic_wc(dev_priv->ggtt.mappable,
+               regs = io_mapping_map_atomic_wc(&dev_priv->ggtt.mappable,
                                                overlay->flip_addr);
 
        return regs;