[media] vb2-dma-sg: add allocation context to dma-sg
[linux-2.6-block.git] / drivers / media / v4l2-core / videobuf2-dma-sg.c
CommitLineData
5ba3f757
AP
1/*
2 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/mm.h>
15#include <linux/scatterlist.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19
20#include <media/videobuf2-core.h>
21#include <media/videobuf2-memops.h>
22#include <media/videobuf2-dma-sg.h>
23
ffdc78ef
HV
24static int debug;
25module_param(debug, int, 0644);
26
27#define dprintk(level, fmt, arg...) \
28 do { \
29 if (debug >= level) \
30 printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
31 } while (0)
32
0c3a14c1
HV
33struct vb2_dma_sg_conf {
34 struct device *dev;
35};
36
5ba3f757 37struct vb2_dma_sg_buf {
0c3a14c1 38 struct device *dev;
5ba3f757
AP
39 void *vaddr;
40 struct page **pages;
5ba3f757 41 int offset;
cd474037 42 enum dma_data_direction dma_dir;
22301247
RR
43 struct sg_table sg_table;
44 size_t size;
45 unsigned int num_pages;
5ba3f757
AP
46 atomic_t refcount;
47 struct vb2_vmarea_handler handler;
50ac952d 48 struct vm_area_struct *vma;
5ba3f757
AP
49};
50
51static void vb2_dma_sg_put(void *buf_priv);
52
df237281
RR
53static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
54 gfp_t gfp_flags)
55{
56 unsigned int last_page = 0;
22301247 57 int size = buf->size;
df237281
RR
58
59 while (size > 0) {
60 struct page *pages;
61 int order;
62 int i;
63
64 order = get_order(size);
65 /* Dont over allocate*/
66 if ((PAGE_SIZE << order) > size)
67 order--;
68
69 pages = NULL;
70 while (!pages) {
71 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
72 __GFP_NOWARN | gfp_flags, order);
73 if (pages)
74 break;
75
76 if (order == 0) {
77 while (last_page--)
78 __free_page(buf->pages[last_page]);
79 return -ENOMEM;
80 }
81 order--;
82 }
83
84 split_page(pages, order);
22301247
RR
85 for (i = 0; i < (1 << order); i++)
86 buf->pages[last_page++] = &pages[i];
df237281
RR
87
88 size -= PAGE_SIZE << order;
89 }
90
91 return 0;
92}
93
d935c57e
HV
94static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
95 enum dma_data_direction dma_dir, gfp_t gfp_flags)
5ba3f757 96{
0c3a14c1 97 struct vb2_dma_sg_conf *conf = alloc_ctx;
5ba3f757 98 struct vb2_dma_sg_buf *buf;
df237281 99 int ret;
22301247 100 int num_pages;
5ba3f757 101
0c3a14c1
HV
102 if (WARN_ON(alloc_ctx == NULL))
103 return NULL;
5ba3f757
AP
104 buf = kzalloc(sizeof *buf, GFP_KERNEL);
105 if (!buf)
106 return NULL;
107
108 buf->vaddr = NULL;
d935c57e 109 buf->dma_dir = dma_dir;
5ba3f757 110 buf->offset = 0;
22301247 111 buf->size = size;
7f841459 112 /* size is already page aligned */
22301247 113 buf->num_pages = size >> PAGE_SHIFT;
5ba3f757 114
22301247 115 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
5ba3f757
AP
116 GFP_KERNEL);
117 if (!buf->pages)
118 goto fail_pages_array_alloc;
119
df237281
RR
120 ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
121 if (ret)
122 goto fail_pages_alloc;
5ba3f757 123
22301247 124 ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
47bc59c5 125 buf->num_pages, 0, size, GFP_KERNEL);
22301247
RR
126 if (ret)
127 goto fail_table_alloc;
128
0c3a14c1
HV
129 /* Prevent the device from being released while the buffer is used */
130 buf->dev = get_device(conf->dev);
5ba3f757
AP
131 buf->handler.refcount = &buf->refcount;
132 buf->handler.put = vb2_dma_sg_put;
133 buf->handler.arg = buf;
134
135 atomic_inc(&buf->refcount);
136
ffdc78ef 137 dprintk(1, "%s: Allocated buffer of %d pages\n",
22301247 138 __func__, buf->num_pages);
5ba3f757
AP
139 return buf;
140
22301247
RR
141fail_table_alloc:
142 num_pages = buf->num_pages;
143 while (num_pages--)
144 __free_page(buf->pages[num_pages]);
5ba3f757 145fail_pages_alloc:
a9bb36aa 146 kfree(buf->pages);
5ba3f757 147fail_pages_array_alloc:
5ba3f757
AP
148 kfree(buf);
149 return NULL;
150}
151
152static void vb2_dma_sg_put(void *buf_priv)
153{
154 struct vb2_dma_sg_buf *buf = buf_priv;
22301247 155 int i = buf->num_pages;
5ba3f757
AP
156
157 if (atomic_dec_and_test(&buf->refcount)) {
ffdc78ef 158 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
22301247 159 buf->num_pages);
5ba3f757 160 if (buf->vaddr)
22301247
RR
161 vm_unmap_ram(buf->vaddr, buf->num_pages);
162 sg_free_table(&buf->sg_table);
5ba3f757
AP
163 while (--i >= 0)
164 __free_page(buf->pages[i]);
165 kfree(buf->pages);
0c3a14c1 166 put_device(buf->dev);
5ba3f757
AP
167 kfree(buf);
168 }
169}
170
50ac952d
RR
171static inline int vma_is_io(struct vm_area_struct *vma)
172{
173 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
174}
175
5ba3f757 176static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
cd474037
HV
177 unsigned long size,
178 enum dma_data_direction dma_dir)
5ba3f757
AP
179{
180 struct vb2_dma_sg_buf *buf;
181 unsigned long first, last;
22301247 182 int num_pages_from_user;
50ac952d 183 struct vm_area_struct *vma;
5ba3f757
AP
184
185 buf = kzalloc(sizeof *buf, GFP_KERNEL);
186 if (!buf)
187 return NULL;
188
189 buf->vaddr = NULL;
cd474037 190 buf->dma_dir = dma_dir;
5ba3f757 191 buf->offset = vaddr & ~PAGE_MASK;
22301247 192 buf->size = size;
5ba3f757
AP
193
194 first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
195 last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
22301247 196 buf->num_pages = last - first + 1;
5ba3f757 197
22301247 198 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
5ba3f757
AP
199 GFP_KERNEL);
200 if (!buf->pages)
64c832a4 201 goto userptr_fail_alloc_pages;
5ba3f757 202
50ac952d
RR
203 vma = find_vma(current->mm, vaddr);
204 if (!vma) {
205 dprintk(1, "no vma for address %lu\n", vaddr);
206 goto userptr_fail_find_vma;
207 }
208
209 if (vma->vm_end < vaddr + size) {
210 dprintk(1, "vma at %lu is too small for %lu bytes\n",
211 vaddr, size);
212 goto userptr_fail_find_vma;
213 }
214
215 buf->vma = vb2_get_vma(vma);
216 if (!buf->vma) {
217 dprintk(1, "failed to copy vma\n");
218 goto userptr_fail_find_vma;
219 }
220
221 if (vma_is_io(buf->vma)) {
222 for (num_pages_from_user = 0;
223 num_pages_from_user < buf->num_pages;
224 ++num_pages_from_user, vaddr += PAGE_SIZE) {
225 unsigned long pfn;
226
227ae227 227 if (follow_pfn(vma, vaddr, &pfn)) {
50ac952d
RR
228 dprintk(1, "no page for address %lu\n", vaddr);
229 break;
230 }
231 buf->pages[num_pages_from_user] = pfn_to_page(pfn);
232 }
233 } else
234 num_pages_from_user = get_user_pages(current, current->mm,
5ba3f757 235 vaddr & PAGE_MASK,
22301247 236 buf->num_pages,
cd474037 237 buf->dma_dir == DMA_FROM_DEVICE,
5ba3f757
AP
238 1, /* force */
239 buf->pages,
240 NULL);
b037c0fd 241
22301247 242 if (num_pages_from_user != buf->num_pages)
5ba3f757
AP
243 goto userptr_fail_get_user_pages;
244
22301247
RR
245 if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
246 buf->num_pages, buf->offset, size, 0))
247 goto userptr_fail_alloc_table_from_pages;
248
5ba3f757
AP
249 return buf;
250
22301247 251userptr_fail_alloc_table_from_pages:
5ba3f757 252userptr_fail_get_user_pages:
ffdc78ef 253 dprintk(1, "get_user_pages requested/got: %d/%d]\n",
202dfbdc 254 buf->num_pages, num_pages_from_user);
50ac952d
RR
255 if (!vma_is_io(buf->vma))
256 while (--num_pages_from_user >= 0)
257 put_page(buf->pages[num_pages_from_user]);
258 vb2_put_vma(buf->vma);
259userptr_fail_find_vma:
a9bb36aa 260 kfree(buf->pages);
64c832a4 261userptr_fail_alloc_pages:
5ba3f757
AP
262 kfree(buf);
263 return NULL;
264}
265
266/*
267 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
268 * be used
269 */
270static void vb2_dma_sg_put_userptr(void *buf_priv)
271{
272 struct vb2_dma_sg_buf *buf = buf_priv;
22301247 273 int i = buf->num_pages;
5ba3f757 274
ffdc78ef 275 dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
22301247 276 __func__, buf->num_pages);
5ba3f757 277 if (buf->vaddr)
22301247
RR
278 vm_unmap_ram(buf->vaddr, buf->num_pages);
279 sg_free_table(&buf->sg_table);
5ba3f757 280 while (--i >= 0) {
cd474037 281 if (buf->dma_dir == DMA_FROM_DEVICE)
5ba3f757 282 set_page_dirty_lock(buf->pages[i]);
50ac952d
RR
283 if (!vma_is_io(buf->vma))
284 put_page(buf->pages[i]);
5ba3f757 285 }
5ba3f757 286 kfree(buf->pages);
50ac952d 287 vb2_put_vma(buf->vma);
5ba3f757
AP
288 kfree(buf);
289}
290
291static void *vb2_dma_sg_vaddr(void *buf_priv)
292{
293 struct vb2_dma_sg_buf *buf = buf_priv;
294
295 BUG_ON(!buf);
296
297 if (!buf->vaddr)
298 buf->vaddr = vm_map_ram(buf->pages,
22301247 299 buf->num_pages,
5ba3f757
AP
300 -1,
301 PAGE_KERNEL);
302
303 /* add offset in case userptr is not page-aligned */
304 return buf->vaddr + buf->offset;
305}
306
307static unsigned int vb2_dma_sg_num_users(void *buf_priv)
308{
309 struct vb2_dma_sg_buf *buf = buf_priv;
310
311 return atomic_read(&buf->refcount);
312}
313
314static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
315{
316 struct vb2_dma_sg_buf *buf = buf_priv;
317 unsigned long uaddr = vma->vm_start;
318 unsigned long usize = vma->vm_end - vma->vm_start;
319 int i = 0;
320
321 if (!buf) {
322 printk(KERN_ERR "No memory to map\n");
323 return -EINVAL;
324 }
325
326 do {
327 int ret;
328
329 ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
330 if (ret) {
331 printk(KERN_ERR "Remapping memory, error: %d\n", ret);
332 return ret;
333 }
334
335 uaddr += PAGE_SIZE;
336 usize -= PAGE_SIZE;
337 } while (usize > 0);
338
339
340 /*
341 * Use common vm_area operations to track buffer refcount.
342 */
343 vma->vm_private_data = &buf->handler;
344 vma->vm_ops = &vb2_common_vm_ops;
345
346 vma->vm_ops->open(vma);
347
348 return 0;
349}
350
351static void *vb2_dma_sg_cookie(void *buf_priv)
352{
353 struct vb2_dma_sg_buf *buf = buf_priv;
354
22301247 355 return &buf->sg_table;
5ba3f757
AP
356}
357
358const struct vb2_mem_ops vb2_dma_sg_memops = {
359 .alloc = vb2_dma_sg_alloc,
360 .put = vb2_dma_sg_put,
361 .get_userptr = vb2_dma_sg_get_userptr,
362 .put_userptr = vb2_dma_sg_put_userptr,
363 .vaddr = vb2_dma_sg_vaddr,
364 .mmap = vb2_dma_sg_mmap,
365 .num_users = vb2_dma_sg_num_users,
366 .cookie = vb2_dma_sg_cookie,
367};
368EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
369
0c3a14c1
HV
370void *vb2_dma_sg_init_ctx(struct device *dev)
371{
372 struct vb2_dma_sg_conf *conf;
373
374 conf = kzalloc(sizeof(*conf), GFP_KERNEL);
375 if (!conf)
376 return ERR_PTR(-ENOMEM);
377
378 conf->dev = dev;
379
380 return conf;
381}
382EXPORT_SYMBOL_GPL(vb2_dma_sg_init_ctx);
383
384void vb2_dma_sg_cleanup_ctx(void *alloc_ctx)
385{
386 if (!IS_ERR_OR_NULL(alloc_ctx))
387 kfree(alloc_ctx);
388}
389EXPORT_SYMBOL_GPL(vb2_dma_sg_cleanup_ctx);
390
5ba3f757
AP
391MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
392MODULE_AUTHOR("Andrzej Pietrasiewicz");
393MODULE_LICENSE("GPL");