udmabuf: add vmap and vunmap methods to udmabuf_ops
authorLukasz Wiecaszek <lukasz.wiecaszek@googlemail.com>
Thu, 17 Nov 2022 17:18:09 +0000 (18:18 +0100)
committerChristian König <christian.koenig@amd.com>
Fri, 18 Nov 2022 09:57:26 +0000 (10:57 +0100)
The reason behind that patch is associated with videobuf2 subsystem
(or more genrally with v4l2 framework) and user created
dma buffers (udmabuf). In some circumstances
when dealing with V4L2_MEMORY_DMABUF buffers videobuf2 subsystem
wants to use dma_buf_vmap() method on the attached dma buffer.
As udmabuf does not have .vmap operation implemented,
such dma_buf_vmap() natually fails.

videobuf2_common: __vb2_queue_alloc: allocated 3 buffers, 1 plane(s) each
videobuf2_common: __prepare_dmabuf: buffer for plane 0 changed
videobuf2_common: __prepare_dmabuf: failed to map dmabuf for plane 0
videobuf2_common: __buf_prepare: buffer preparation failed: -14

The patch itself seems to be strighforward.
It adds implementation of .vmap and .vunmap methods
to 'struct dma_buf_ops udmabuf_ops'.
.vmap method itself uses vm_map_ram() to map pages linearly
into the kernel virtual address space.
.vunmap removes mapping created earlier by .vmap.
All locking and 'vmapping counting' is done in dma_buf.c
so it seems to be redundant/unnecessary in .vmap/.vunmap.

Signed-off-by: Lukasz Wiecaszek <lukasz.wiecaszek@gmail.com>
Reviewed-by: Dmitry Osipenko <dmitry.osipenko@collabora.com>
Acked-by: Christian König <christian.koenig@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20221117171810.75637-1-lukasz.wiecaszek@gmail.com
Signed-off-by: Christian König <christian.koenig@amd.com>
drivers/dma-buf/udmabuf.c

index 283816fbd72fc39b34b17a559950a4c94af08d90..740d6e426ee952839407b9738c1d94813a9b79f0 100644 (file)
@@ -13,6 +13,8 @@
 #include <linux/slab.h>
 #include <linux/udmabuf.h>
 #include <linux/hugetlb.h>
+#include <linux/vmalloc.h>
+#include <linux/iosys-map.h>
 
 static int list_limit = 1024;
 module_param(list_limit, int, 0644);
@@ -60,6 +62,30 @@ static int mmap_udmabuf(struct dma_buf *buf, struct vm_area_struct *vma)
        return 0;
 }
 
+static int vmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
+{
+       struct udmabuf *ubuf = buf->priv;
+       void *vaddr;
+
+       dma_resv_assert_held(buf->resv);
+
+       vaddr = vm_map_ram(ubuf->pages, ubuf->pagecount, -1);
+       if (!vaddr)
+               return -EINVAL;
+
+       iosys_map_set_vaddr(map, vaddr);
+       return 0;
+}
+
+static void vunmap_udmabuf(struct dma_buf *buf, struct iosys_map *map)
+{
+       struct udmabuf *ubuf = buf->priv;
+
+       dma_resv_assert_held(buf->resv);
+
+       vm_unmap_ram(map->vaddr, ubuf->pagecount);
+}
+
 static struct sg_table *get_sg_table(struct device *dev, struct dma_buf *buf,
                                     enum dma_data_direction direction)
 {
@@ -162,6 +188,8 @@ static const struct dma_buf_ops udmabuf_ops = {
        .unmap_dma_buf     = unmap_udmabuf,
        .release           = release_udmabuf,
        .mmap              = mmap_udmabuf,
+       .vmap              = vmap_udmabuf,
+       .vunmap            = vunmap_udmabuf,
        .begin_cpu_access  = begin_cpu_udmabuf,
        .end_cpu_access    = end_cpu_udmabuf,
 };