2 * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
4 * Copyright (C) 2010 Samsung Electronics
6 * Author: Pawel Osciak <pawel@osciak.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
14 #include <linux/module.h>
16 #include <linux/refcount.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
21 #include <media/videobuf2-v4l2.h>
22 #include <media/videobuf2-vmalloc.h>
23 #include <media/videobuf2-memops.h>
25 struct vb2_vmalloc_buf {
27 struct frame_vector *vec;
28 enum dma_data_direction dma_dir;
31 struct vb2_vmarea_handler handler;
35 static void vb2_vmalloc_put(void *buf_priv);
37 static void *vb2_vmalloc_alloc(struct vb2_buffer *vb, struct device *dev,
40 struct vb2_vmalloc_buf *buf;
42 buf = kzalloc(sizeof(*buf), GFP_KERNEL | vb->vb2_queue->gfp_flags);
44 return ERR_PTR(-ENOMEM);
47 buf->vaddr = vmalloc_user(buf->size);
49 pr_debug("vmalloc of size %ld failed\n", buf->size);
51 return ERR_PTR(-ENOMEM);
54 buf->dma_dir = vb->vb2_queue->dma_dir;
55 buf->handler.refcount = &buf->refcount;
56 buf->handler.put = vb2_vmalloc_put;
57 buf->handler.arg = buf;
59 refcount_set(&buf->refcount, 1);
63 static void vb2_vmalloc_put(void *buf_priv)
65 struct vb2_vmalloc_buf *buf = buf_priv;
67 if (refcount_dec_and_test(&buf->refcount)) {
73 static void *vb2_vmalloc_get_userptr(struct vb2_buffer *vb, struct device *dev,
74 unsigned long vaddr, unsigned long size)
76 struct vb2_vmalloc_buf *buf;
77 struct frame_vector *vec;
78 int n_pages, offset, i;
81 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
83 return ERR_PTR(-ENOMEM);
85 buf->dma_dir = vb->vb2_queue->dma_dir;
86 offset = vaddr & ~PAGE_MASK;
88 vec = vb2_create_framevec(vaddr, size);
91 goto fail_pfnvec_create;
94 n_pages = frame_vector_count(vec);
95 if (frame_vector_to_pages(vec) < 0) {
96 unsigned long *nums = frame_vector_pfns(vec);
99 * We cannot get page pointers for these pfns. Check memory is
100 * physically contiguous and use direct mapping.
102 for (i = 1; i < n_pages; i++)
103 if (nums[i-1] + 1 != nums[i])
105 buf->vaddr = (__force void *)
106 ioremap(__pfn_to_phys(nums[0]), size + offset);
108 buf->vaddr = vm_map_ram(frame_vector_pages(vec), n_pages, -1);
113 buf->vaddr += offset;
117 vb2_destroy_framevec(vec);
124 static void vb2_vmalloc_put_userptr(void *buf_priv)
126 struct vb2_vmalloc_buf *buf = buf_priv;
127 unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
130 unsigned int n_pages;
132 if (!buf->vec->is_pfns) {
133 n_pages = frame_vector_count(buf->vec);
134 pages = frame_vector_pages(buf->vec);
136 vm_unmap_ram((void *)vaddr, n_pages);
137 if (buf->dma_dir == DMA_FROM_DEVICE ||
138 buf->dma_dir == DMA_BIDIRECTIONAL)
139 for (i = 0; i < n_pages; i++)
140 set_page_dirty_lock(pages[i]);
142 iounmap((__force void __iomem *)buf->vaddr);
144 vb2_destroy_framevec(buf->vec);
148 static void *vb2_vmalloc_vaddr(struct vb2_buffer *vb, void *buf_priv)
150 struct vb2_vmalloc_buf *buf = buf_priv;
153 pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
160 static unsigned int vb2_vmalloc_num_users(void *buf_priv)
162 struct vb2_vmalloc_buf *buf = buf_priv;
163 return refcount_read(&buf->refcount);
166 static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
168 struct vb2_vmalloc_buf *buf = buf_priv;
172 pr_err("No memory to map\n");
176 ret = remap_vmalloc_range(vma, buf->vaddr, 0);
178 pr_err("Remapping vmalloc memory, error: %d\n", ret);
183 * Make sure that vm_areas for 2 buffers won't be merged together
185 vma->vm_flags |= VM_DONTEXPAND;
188 * Use common vm_area operations to track buffer refcount.
190 vma->vm_private_data = &buf->handler;
191 vma->vm_ops = &vb2_common_vm_ops;
193 vma->vm_ops->open(vma);
198 #ifdef CONFIG_HAS_DMA
199 /*********************************************/
200 /* DMABUF ops for exporters */
201 /*********************************************/
203 struct vb2_vmalloc_attachment {
205 enum dma_data_direction dma_dir;
208 static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
209 struct dma_buf_attachment *dbuf_attach)
211 struct vb2_vmalloc_attachment *attach;
212 struct vb2_vmalloc_buf *buf = dbuf->priv;
213 int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
214 struct sg_table *sgt;
215 struct scatterlist *sg;
216 void *vaddr = buf->vaddr;
220 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
225 ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
230 for_each_sgtable_sg(sgt, sg, i) {
231 struct page *page = vmalloc_to_page(vaddr);
238 sg_set_page(sg, page, PAGE_SIZE, 0);
242 attach->dma_dir = DMA_NONE;
243 dbuf_attach->priv = attach;
247 static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
248 struct dma_buf_attachment *db_attach)
250 struct vb2_vmalloc_attachment *attach = db_attach->priv;
251 struct sg_table *sgt;
258 /* release the scatterlist cache */
259 if (attach->dma_dir != DMA_NONE)
260 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
263 db_attach->priv = NULL;
266 static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
267 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
269 struct vb2_vmalloc_attachment *attach = db_attach->priv;
270 /* stealing dmabuf mutex to serialize map/unmap operations */
271 struct mutex *lock = &db_attach->dmabuf->lock;
272 struct sg_table *sgt;
277 /* return previously mapped sg table */
278 if (attach->dma_dir == dma_dir) {
283 /* release any previous cache */
284 if (attach->dma_dir != DMA_NONE) {
285 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir, 0);
286 attach->dma_dir = DMA_NONE;
289 /* mapping to the client with new direction */
290 if (dma_map_sgtable(db_attach->dev, sgt, dma_dir, 0)) {
291 pr_err("failed to map scatterlist\n");
293 return ERR_PTR(-EIO);
296 attach->dma_dir = dma_dir;
303 static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
304 struct sg_table *sgt, enum dma_data_direction dma_dir)
306 /* nothing to be done here */
309 static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
311 /* drop reference obtained in vb2_vmalloc_get_dmabuf */
312 vb2_vmalloc_put(dbuf->priv);
315 static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf,
316 struct iosys_map *map)
318 struct vb2_vmalloc_buf *buf = dbuf->priv;
320 iosys_map_set_vaddr(map, buf->vaddr);
325 static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
326 struct vm_area_struct *vma)
328 return vb2_vmalloc_mmap(dbuf->priv, vma);
331 static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
332 .attach = vb2_vmalloc_dmabuf_ops_attach,
333 .detach = vb2_vmalloc_dmabuf_ops_detach,
334 .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
335 .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
336 .vmap = vb2_vmalloc_dmabuf_ops_vmap,
337 .mmap = vb2_vmalloc_dmabuf_ops_mmap,
338 .release = vb2_vmalloc_dmabuf_ops_release,
341 static struct dma_buf *vb2_vmalloc_get_dmabuf(struct vb2_buffer *vb,
345 struct vb2_vmalloc_buf *buf = buf_priv;
346 struct dma_buf *dbuf;
347 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
349 exp_info.ops = &vb2_vmalloc_dmabuf_ops;
350 exp_info.size = buf->size;
351 exp_info.flags = flags;
354 if (WARN_ON(!buf->vaddr))
357 dbuf = dma_buf_export(&exp_info);
361 /* dmabuf keeps reference to vb2 buffer */
362 refcount_inc(&buf->refcount);
366 #endif /* CONFIG_HAS_DMA */
369 /*********************************************/
370 /* callbacks for DMABUF buffers */
371 /*********************************************/
373 static int vb2_vmalloc_map_dmabuf(void *mem_priv)
375 struct vb2_vmalloc_buf *buf = mem_priv;
376 struct iosys_map map;
379 ret = dma_buf_vmap(buf->dbuf, &map);
382 buf->vaddr = map.vaddr;
387 static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
389 struct vb2_vmalloc_buf *buf = mem_priv;
390 struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
392 dma_buf_vunmap(buf->dbuf, &map);
396 static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
398 struct vb2_vmalloc_buf *buf = mem_priv;
399 struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
402 dma_buf_vunmap(buf->dbuf, &map);
407 static void *vb2_vmalloc_attach_dmabuf(struct vb2_buffer *vb,
409 struct dma_buf *dbuf,
412 struct vb2_vmalloc_buf *buf;
414 if (dbuf->size < size)
415 return ERR_PTR(-EFAULT);
417 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
419 return ERR_PTR(-ENOMEM);
422 buf->dma_dir = vb->vb2_queue->dma_dir;
429 const struct vb2_mem_ops vb2_vmalloc_memops = {
430 .alloc = vb2_vmalloc_alloc,
431 .put = vb2_vmalloc_put,
432 .get_userptr = vb2_vmalloc_get_userptr,
433 .put_userptr = vb2_vmalloc_put_userptr,
434 #ifdef CONFIG_HAS_DMA
435 .get_dmabuf = vb2_vmalloc_get_dmabuf,
437 .map_dmabuf = vb2_vmalloc_map_dmabuf,
438 .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
439 .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
440 .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
441 .vaddr = vb2_vmalloc_vaddr,
442 .mmap = vb2_vmalloc_mmap,
443 .num_users = vb2_vmalloc_num_users,
445 EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
447 MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
448 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
449 MODULE_LICENSE("GPL");
450 MODULE_IMPORT_NS(DMA_BUF);