dma-buf: Remove range-based flush
authorTiago Vignatti <tiago.vignatti@intel.com>
Tue, 22 Dec 2015 21:36:45 +0000 (19:36 -0200)
committerDaniel Vetter <daniel.vetter@ffwll.ch>
Tue, 9 Feb 2016 08:25:22 +0000 (09:25 +0100)
This patch removes range-based information used for optimizations in
begin_cpu_access and end_cpu_access.

We don't have any user nor implementation using range-based flush. It seems a
consensus that if we ever want something like that again (or even more robust
using 2D, 3D sub-range regions) we can use the upcoming dma-buf sync ioctl for
such.

Cc: Sumit Semwal <sumit.semwal@linaro.org>
Cc: Daniel Vetter <daniel.vetter@intel.com>
Signed-off-by: Tiago Vignatti <tiago.vignatti@intel.com>
Reviewed-by: Stéphane Marchesin <marcheu@chromium.org>
Signed-off-by: Daniel Vetter <daniel.vetter@ffwll.ch>
Link: http://patchwork.freedesktop.org/patch/msgid/1450820214-12509-3-git-send-email-tiago.vignatti@intel.com
Documentation/dma-buf-sharing.txt
drivers/dma-buf/dma-buf.c
drivers/gpu/drm/i915/i915_gem_dmabuf.c
drivers/gpu/drm/omapdrm/omap_gem_dmabuf.c
drivers/gpu/drm/udl/udl_fb.c
drivers/staging/android/ion/ion.c
drivers/staging/android/ion/ion_test.c
include/linux/dma-buf.h

index 480c8de3c2c44786174e112795f61b2381d3b09f..4f4a84b6903a64acbebe38ffcb67bbb25d6815e9 100644 (file)
@@ -257,17 +257,15 @@ Access to a dma_buf from the kernel context involves three steps:
 
    Interface:
       int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
-                                  size_t start, size_t len,
                                   enum dma_data_direction direction)
 
    This allows the exporter to ensure that the memory is actually available for
    cpu access - the exporter might need to allocate or swap-in and pin the
    backing storage. The exporter also needs to ensure that cpu access is
-   coherent for the given range and access direction. The range and access
-   direction can be used by the exporter to optimize the cache flushing, i.e.
-   access outside of the range or with a different direction (read instead of
-   write) might return stale or even bogus data (e.g. when the exporter needs to
-   copy the data to temporary storage).
+   coherent for the access direction. The direction can be used by the exporter
+   to optimize the cache flushing, i.e. access with a different direction (read
+   instead of write) might return stale or even bogus data (e.g. when the
+   exporter needs to copy the data to temporary storage).
 
    This step might fail, e.g. in oom conditions.
 
@@ -322,14 +320,13 @@ Access to a dma_buf from the kernel context involves three steps:
 
 3. Finish access
 
-   When the importer is done accessing the range specified in begin_cpu_access,
-   it needs to announce this to the exporter (to facilitate cache flushing and
-   unpinning of any pinned resources). The result of any dma_buf kmap calls
-   after end_cpu_access is undefined.
+   When the importer is done accessing the CPU, it needs to announce this to
+   the exporter (to facilitate cache flushing and unpinning of any pinned
+   resources). The result of any dma_buf kmap calls after end_cpu_access is
+   undefined.
 
    Interface:
       void dma_buf_end_cpu_access(struct dma_buf *dma_buf,
-                                 size_t start, size_t len,
                                  enum dma_data_direction dir);
 
 
index 155c1464948e7ad02f885302754353725775b3be..b2ac13b4ddaa89d0c98fcecc31b4c3c4d1d4d264 100644 (file)
@@ -539,13 +539,11 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
  * preparations. Coherency is only guaranteed in the specified range for the
  * specified access direction.
  * @dmabuf:    [in]    buffer to prepare cpu access for.
- * @start:     [in]    start of range for cpu access.
- * @len:       [in]    length of range for cpu access.
  * @direction: [in]    length of range for cpu access.
  *
  * Can return negative error values, returns 0 on success.
  */
-int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
+int dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
                             enum dma_data_direction direction)
 {
        int ret = 0;
@@ -554,8 +552,7 @@ int dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
                return -EINVAL;
 
        if (dmabuf->ops->begin_cpu_access)
-               ret = dmabuf->ops->begin_cpu_access(dmabuf, start,
-                                                       len, direction);
+               ret = dmabuf->ops->begin_cpu_access(dmabuf, direction);
 
        return ret;
 }
@@ -567,19 +564,17 @@ EXPORT_SYMBOL_GPL(dma_buf_begin_cpu_access);
  * actions. Coherency is only guaranteed in the specified range for the
  * specified access direction.
  * @dmabuf:    [in]    buffer to complete cpu access for.
- * @start:     [in]    start of range for cpu access.
- * @len:       [in]    length of range for cpu access.
  * @direction: [in]    length of range for cpu access.
  *
  * This call must always succeed.
  */
-void dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start, size_t len,
+void dma_buf_end_cpu_access(struct dma_buf *dmabuf,
                            enum dma_data_direction direction)
 {
        WARN_ON(!dmabuf);
 
        if (dmabuf->ops->end_cpu_access)
-               dmabuf->ops->end_cpu_access(dmabuf, start, len, direction);
+               dmabuf->ops->end_cpu_access(dmabuf, direction);
 }
 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
 
index e9c2bfd85b5268425ce9465ee2d23d99ee92c534..65ab2bd54af5fb40a4058390274a49df26a7dbfa 100644 (file)
@@ -196,7 +196,7 @@ static int i915_gem_dmabuf_mmap(struct dma_buf *dma_buf, struct vm_area_struct *
        return -EINVAL;
 }
 
-static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t length, enum dma_data_direction direction)
+static int i915_gem_begin_cpu_access(struct dma_buf *dma_buf, enum dma_data_direction direction)
 {
        struct drm_i915_gem_object *obj = dma_buf_to_obj(dma_buf);
        struct drm_device *dev = obj->base.dev;
index 27c297672076b058128e67e452b7ee1a47e7eb59..aebae1c2dab2ac8dac11222f31e12847b1273e20 100644 (file)
@@ -79,7 +79,7 @@ static void omap_gem_dmabuf_release(struct dma_buf *buffer)
 
 
 static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
-               size_t start, size_t len, enum dma_data_direction dir)
+               enum dma_data_direction dir)
 {
        struct drm_gem_object *obj = buffer->priv;
        struct page **pages;
@@ -94,7 +94,7 @@ static int omap_gem_dmabuf_begin_cpu_access(struct dma_buf *buffer,
 }
 
 static void omap_gem_dmabuf_end_cpu_access(struct dma_buf *buffer,
-               size_t start, size_t len, enum dma_data_direction dir)
+               enum dma_data_direction dir)
 {
        struct drm_gem_object *obj = buffer->priv;
        omap_gem_put_pages(obj);
index 200419d4d43cf7fd475729b0fc10e97478359a76..c427499133d6e3ffbc7f777ada41ee8ec1c42139 100644 (file)
@@ -409,7 +409,6 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
 
        if (ufb->obj->base.import_attach) {
                ret = dma_buf_begin_cpu_access(ufb->obj->base.import_attach->dmabuf,
-                                              0, ufb->obj->base.size,
                                               DMA_FROM_DEVICE);
                if (ret)
                        goto unlock;
@@ -425,7 +424,6 @@ static int udl_user_framebuffer_dirty(struct drm_framebuffer *fb,
 
        if (ufb->obj->base.import_attach) {
                dma_buf_end_cpu_access(ufb->obj->base.import_attach->dmabuf,
-                                      0, ufb->obj->base.size,
                                       DMA_FROM_DEVICE);
        }
 
index e237e9f3312d6b99e5d2eac4a07f534ce03690c2..0754a37c967495fdafdf5bee4bd67d505c347dda 100644 (file)
@@ -1057,8 +1057,7 @@ static void ion_dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long offset,
 {
 }
 
-static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
-                                       size_t len,
+static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf,
                                        enum dma_data_direction direction)
 {
        struct ion_buffer *buffer = dmabuf->priv;
@@ -1076,8 +1075,7 @@ static int ion_dma_buf_begin_cpu_access(struct dma_buf *dmabuf, size_t start,
        return PTR_ERR_OR_ZERO(vaddr);
 }
 
-static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf, size_t start,
-                                      size_t len,
+static void ion_dma_buf_end_cpu_access(struct dma_buf *dmabuf,
                                       enum dma_data_direction direction)
 {
        struct ion_buffer *buffer = dmabuf->priv;
index b8dcf5a26cc4ac1250bda0ded24d1ceff3c8cf24..da34bc12cd7cc5f9db3ea74d8e41eccf139873d3 100644 (file)
@@ -109,7 +109,7 @@ static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr,
        if (offset > dma_buf->size || size > dma_buf->size - offset)
                return -EINVAL;
 
-       ret = dma_buf_begin_cpu_access(dma_buf, offset, size, dir);
+       ret = dma_buf_begin_cpu_access(dma_buf, dir);
        if (ret)
                return ret;
 
@@ -139,7 +139,7 @@ static int ion_handle_test_kernel(struct dma_buf *dma_buf, void __user *ptr,
                copy_offset = 0;
        }
 err:
-       dma_buf_end_cpu_access(dma_buf, offset, size, dir);
+       dma_buf_end_cpu_access(dma_buf, dir);
        return ret;
 }
 
index f98bd7068d55a3f4ddfdb50dc01d95909ab30f45..532108ea0c1c046dbc264af046137efc3a0d7b81 100644 (file)
@@ -54,7 +54,7 @@ struct dma_buf_attachment;
  * @release: release this buffer; to be called after the last dma_buf_put.
  * @begin_cpu_access: [optional] called before cpu access to invalidate cpu
  *                   caches and allocate backing storage (if not yet done)
- *                   respectively pin the objet into memory.
+ *                   respectively pin the object into memory.
  * @end_cpu_access: [optional] called after cpu access to flush caches.
  * @kmap_atomic: maps a page from the buffer into kernel address
  *              space, users may not block until the subsequent unmap call.
@@ -93,10 +93,8 @@ struct dma_buf_ops {
        /* after final dma_buf_put() */
        void (*release)(struct dma_buf *);
 
-       int (*begin_cpu_access)(struct dma_buf *, size_t, size_t,
-                               enum dma_data_direction);
-       void (*end_cpu_access)(struct dma_buf *, size_t, size_t,
-                              enum dma_data_direction);
+       int (*begin_cpu_access)(struct dma_buf *, enum dma_data_direction);
+       void (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
        void *(*kmap_atomic)(struct dma_buf *, unsigned long);
        void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
        void *(*kmap)(struct dma_buf *, unsigned long);
@@ -224,9 +222,9 @@ struct sg_table *dma_buf_map_attachment(struct dma_buf_attachment *,
                                        enum dma_data_direction);
 void dma_buf_unmap_attachment(struct dma_buf_attachment *, struct sg_table *,
                                enum dma_data_direction);
-int dma_buf_begin_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
+int dma_buf_begin_cpu_access(struct dma_buf *dma_buf,
                             enum dma_data_direction dir);
-void dma_buf_end_cpu_access(struct dma_buf *dma_buf, size_t start, size_t len,
+void dma_buf_end_cpu_access(struct dma_buf *dma_buf,
                            enum dma_data_direction dir);
 void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
 void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);