[media] vb2-dma-sg: move dma_(un)map_sg here
[linux-2.6-block.git] / drivers / media / v4l2-core / videobuf2-dma-sg.c
CommitLineData
5ba3f757
AP
1/*
2 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/mm.h>
15#include <linux/scatterlist.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19
20#include <media/videobuf2-core.h>
21#include <media/videobuf2-memops.h>
22#include <media/videobuf2-dma-sg.h>
23
ffdc78ef
HV
24static int debug;
25module_param(debug, int, 0644);
26
27#define dprintk(level, fmt, arg...) \
28 do { \
29 if (debug >= level) \
30 printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
31 } while (0)
32
0c3a14c1
HV
33struct vb2_dma_sg_conf {
34 struct device *dev;
35};
36
5ba3f757 37struct vb2_dma_sg_buf {
0c3a14c1 38 struct device *dev;
5ba3f757
AP
39 void *vaddr;
40 struct page **pages;
5ba3f757 41 int offset;
cd474037 42 enum dma_data_direction dma_dir;
22301247
RR
43 struct sg_table sg_table;
44 size_t size;
45 unsigned int num_pages;
5ba3f757
AP
46 atomic_t refcount;
47 struct vb2_vmarea_handler handler;
50ac952d 48 struct vm_area_struct *vma;
5ba3f757
AP
49};
50
51static void vb2_dma_sg_put(void *buf_priv);
52
df237281
RR
53static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
54 gfp_t gfp_flags)
55{
56 unsigned int last_page = 0;
22301247 57 int size = buf->size;
df237281
RR
58
59 while (size > 0) {
60 struct page *pages;
61 int order;
62 int i;
63
64 order = get_order(size);
65 /* Dont over allocate*/
66 if ((PAGE_SIZE << order) > size)
67 order--;
68
69 pages = NULL;
70 while (!pages) {
71 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
72 __GFP_NOWARN | gfp_flags, order);
73 if (pages)
74 break;
75
76 if (order == 0) {
77 while (last_page--)
78 __free_page(buf->pages[last_page]);
79 return -ENOMEM;
80 }
81 order--;
82 }
83
84 split_page(pages, order);
22301247
RR
85 for (i = 0; i < (1 << order); i++)
86 buf->pages[last_page++] = &pages[i];
df237281
RR
87
88 size -= PAGE_SIZE << order;
89 }
90
91 return 0;
92}
93
d935c57e
HV
94static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
95 enum dma_data_direction dma_dir, gfp_t gfp_flags)
5ba3f757 96{
0c3a14c1 97 struct vb2_dma_sg_conf *conf = alloc_ctx;
5ba3f757 98 struct vb2_dma_sg_buf *buf;
d790b7ed 99 struct sg_table *sgt;
df237281 100 int ret;
22301247 101 int num_pages;
5ba3f757 102
0c3a14c1
HV
103 if (WARN_ON(alloc_ctx == NULL))
104 return NULL;
5ba3f757
AP
105 buf = kzalloc(sizeof *buf, GFP_KERNEL);
106 if (!buf)
107 return NULL;
108
109 buf->vaddr = NULL;
d935c57e 110 buf->dma_dir = dma_dir;
5ba3f757 111 buf->offset = 0;
22301247 112 buf->size = size;
7f841459 113 /* size is already page aligned */
22301247 114 buf->num_pages = size >> PAGE_SHIFT;
5ba3f757 115
22301247 116 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
5ba3f757
AP
117 GFP_KERNEL);
118 if (!buf->pages)
119 goto fail_pages_array_alloc;
120
df237281
RR
121 ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
122 if (ret)
123 goto fail_pages_alloc;
5ba3f757 124
22301247 125 ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
47bc59c5 126 buf->num_pages, 0, size, GFP_KERNEL);
22301247
RR
127 if (ret)
128 goto fail_table_alloc;
129
0c3a14c1
HV
130 /* Prevent the device from being released while the buffer is used */
131 buf->dev = get_device(conf->dev);
d790b7ed
HV
132
133 sgt = &buf->sg_table;
134 if (dma_map_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir) == 0)
135 goto fail_map;
136 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
137
5ba3f757
AP
138 buf->handler.refcount = &buf->refcount;
139 buf->handler.put = vb2_dma_sg_put;
140 buf->handler.arg = buf;
141
142 atomic_inc(&buf->refcount);
143
ffdc78ef 144 dprintk(1, "%s: Allocated buffer of %d pages\n",
22301247 145 __func__, buf->num_pages);
5ba3f757
AP
146 return buf;
147
d790b7ed
HV
148fail_map:
149 put_device(buf->dev);
150 sg_free_table(sgt);
22301247
RR
151fail_table_alloc:
152 num_pages = buf->num_pages;
153 while (num_pages--)
154 __free_page(buf->pages[num_pages]);
5ba3f757 155fail_pages_alloc:
a9bb36aa 156 kfree(buf->pages);
5ba3f757 157fail_pages_array_alloc:
5ba3f757
AP
158 kfree(buf);
159 return NULL;
160}
161
162static void vb2_dma_sg_put(void *buf_priv)
163{
164 struct vb2_dma_sg_buf *buf = buf_priv;
d790b7ed 165 struct sg_table *sgt = &buf->sg_table;
22301247 166 int i = buf->num_pages;
5ba3f757
AP
167
168 if (atomic_dec_and_test(&buf->refcount)) {
ffdc78ef 169 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
22301247 170 buf->num_pages);
d790b7ed 171 dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
5ba3f757 172 if (buf->vaddr)
22301247
RR
173 vm_unmap_ram(buf->vaddr, buf->num_pages);
174 sg_free_table(&buf->sg_table);
5ba3f757
AP
175 while (--i >= 0)
176 __free_page(buf->pages[i]);
177 kfree(buf->pages);
0c3a14c1 178 put_device(buf->dev);
5ba3f757
AP
179 kfree(buf);
180 }
181}
182
d790b7ed
HV
183static void vb2_dma_sg_prepare(void *buf_priv)
184{
185 struct vb2_dma_sg_buf *buf = buf_priv;
186 struct sg_table *sgt = &buf->sg_table;
187
188 dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
189}
190
191static void vb2_dma_sg_finish(void *buf_priv)
192{
193 struct vb2_dma_sg_buf *buf = buf_priv;
194 struct sg_table *sgt = &buf->sg_table;
195
196 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
197}
198
50ac952d
RR
199static inline int vma_is_io(struct vm_area_struct *vma)
200{
201 return !!(vma->vm_flags & (VM_IO | VM_PFNMAP));
202}
203
5ba3f757 204static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
cd474037
HV
205 unsigned long size,
206 enum dma_data_direction dma_dir)
5ba3f757 207{
d790b7ed 208 struct vb2_dma_sg_conf *conf = alloc_ctx;
5ba3f757
AP
209 struct vb2_dma_sg_buf *buf;
210 unsigned long first, last;
22301247 211 int num_pages_from_user;
50ac952d 212 struct vm_area_struct *vma;
d790b7ed 213 struct sg_table *sgt;
5ba3f757
AP
214
215 buf = kzalloc(sizeof *buf, GFP_KERNEL);
216 if (!buf)
217 return NULL;
218
219 buf->vaddr = NULL;
d790b7ed 220 buf->dev = conf->dev;
cd474037 221 buf->dma_dir = dma_dir;
5ba3f757 222 buf->offset = vaddr & ~PAGE_MASK;
22301247 223 buf->size = size;
5ba3f757
AP
224
225 first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
226 last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
22301247 227 buf->num_pages = last - first + 1;
5ba3f757 228
22301247 229 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
5ba3f757
AP
230 GFP_KERNEL);
231 if (!buf->pages)
64c832a4 232 goto userptr_fail_alloc_pages;
5ba3f757 233
50ac952d
RR
234 vma = find_vma(current->mm, vaddr);
235 if (!vma) {
236 dprintk(1, "no vma for address %lu\n", vaddr);
237 goto userptr_fail_find_vma;
238 }
239
240 if (vma->vm_end < vaddr + size) {
241 dprintk(1, "vma at %lu is too small for %lu bytes\n",
242 vaddr, size);
243 goto userptr_fail_find_vma;
244 }
245
246 buf->vma = vb2_get_vma(vma);
247 if (!buf->vma) {
248 dprintk(1, "failed to copy vma\n");
249 goto userptr_fail_find_vma;
250 }
251
252 if (vma_is_io(buf->vma)) {
253 for (num_pages_from_user = 0;
254 num_pages_from_user < buf->num_pages;
255 ++num_pages_from_user, vaddr += PAGE_SIZE) {
256 unsigned long pfn;
257
227ae227 258 if (follow_pfn(vma, vaddr, &pfn)) {
50ac952d
RR
259 dprintk(1, "no page for address %lu\n", vaddr);
260 break;
261 }
262 buf->pages[num_pages_from_user] = pfn_to_page(pfn);
263 }
264 } else
265 num_pages_from_user = get_user_pages(current, current->mm,
5ba3f757 266 vaddr & PAGE_MASK,
22301247 267 buf->num_pages,
cd474037 268 buf->dma_dir == DMA_FROM_DEVICE,
5ba3f757
AP
269 1, /* force */
270 buf->pages,
271 NULL);
b037c0fd 272
22301247 273 if (num_pages_from_user != buf->num_pages)
5ba3f757
AP
274 goto userptr_fail_get_user_pages;
275
22301247
RR
276 if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
277 buf->num_pages, buf->offset, size, 0))
278 goto userptr_fail_alloc_table_from_pages;
279
d790b7ed
HV
280 sgt = &buf->sg_table;
281 if (dma_map_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir) == 0)
282 goto userptr_fail_map;
283 dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
5ba3f757
AP
284 return buf;
285
d790b7ed
HV
286userptr_fail_map:
287 sg_free_table(&buf->sg_table);
22301247 288userptr_fail_alloc_table_from_pages:
5ba3f757 289userptr_fail_get_user_pages:
ffdc78ef 290 dprintk(1, "get_user_pages requested/got: %d/%d]\n",
202dfbdc 291 buf->num_pages, num_pages_from_user);
50ac952d
RR
292 if (!vma_is_io(buf->vma))
293 while (--num_pages_from_user >= 0)
294 put_page(buf->pages[num_pages_from_user]);
295 vb2_put_vma(buf->vma);
296userptr_fail_find_vma:
a9bb36aa 297 kfree(buf->pages);
64c832a4 298userptr_fail_alloc_pages:
5ba3f757
AP
299 kfree(buf);
300 return NULL;
301}
302
303/*
304 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
305 * be used
306 */
307static void vb2_dma_sg_put_userptr(void *buf_priv)
308{
309 struct vb2_dma_sg_buf *buf = buf_priv;
d790b7ed 310 struct sg_table *sgt = &buf->sg_table;
22301247 311 int i = buf->num_pages;
5ba3f757 312
ffdc78ef 313 dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
22301247 314 __func__, buf->num_pages);
d790b7ed 315 dma_unmap_sg(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir);
5ba3f757 316 if (buf->vaddr)
22301247
RR
317 vm_unmap_ram(buf->vaddr, buf->num_pages);
318 sg_free_table(&buf->sg_table);
5ba3f757 319 while (--i >= 0) {
cd474037 320 if (buf->dma_dir == DMA_FROM_DEVICE)
5ba3f757 321 set_page_dirty_lock(buf->pages[i]);
50ac952d
RR
322 if (!vma_is_io(buf->vma))
323 put_page(buf->pages[i]);
5ba3f757 324 }
5ba3f757 325 kfree(buf->pages);
50ac952d 326 vb2_put_vma(buf->vma);
5ba3f757
AP
327 kfree(buf);
328}
329
330static void *vb2_dma_sg_vaddr(void *buf_priv)
331{
332 struct vb2_dma_sg_buf *buf = buf_priv;
333
334 BUG_ON(!buf);
335
336 if (!buf->vaddr)
337 buf->vaddr = vm_map_ram(buf->pages,
22301247 338 buf->num_pages,
5ba3f757
AP
339 -1,
340 PAGE_KERNEL);
341
342 /* add offset in case userptr is not page-aligned */
343 return buf->vaddr + buf->offset;
344}
345
346static unsigned int vb2_dma_sg_num_users(void *buf_priv)
347{
348 struct vb2_dma_sg_buf *buf = buf_priv;
349
350 return atomic_read(&buf->refcount);
351}
352
353static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
354{
355 struct vb2_dma_sg_buf *buf = buf_priv;
356 unsigned long uaddr = vma->vm_start;
357 unsigned long usize = vma->vm_end - vma->vm_start;
358 int i = 0;
359
360 if (!buf) {
361 printk(KERN_ERR "No memory to map\n");
362 return -EINVAL;
363 }
364
365 do {
366 int ret;
367
368 ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
369 if (ret) {
370 printk(KERN_ERR "Remapping memory, error: %d\n", ret);
371 return ret;
372 }
373
374 uaddr += PAGE_SIZE;
375 usize -= PAGE_SIZE;
376 } while (usize > 0);
377
378
379 /*
380 * Use common vm_area operations to track buffer refcount.
381 */
382 vma->vm_private_data = &buf->handler;
383 vma->vm_ops = &vb2_common_vm_ops;
384
385 vma->vm_ops->open(vma);
386
387 return 0;
388}
389
390static void *vb2_dma_sg_cookie(void *buf_priv)
391{
392 struct vb2_dma_sg_buf *buf = buf_priv;
393
22301247 394 return &buf->sg_table;
5ba3f757
AP
395}
396
397const struct vb2_mem_ops vb2_dma_sg_memops = {
398 .alloc = vb2_dma_sg_alloc,
399 .put = vb2_dma_sg_put,
400 .get_userptr = vb2_dma_sg_get_userptr,
401 .put_userptr = vb2_dma_sg_put_userptr,
d790b7ed
HV
402 .prepare = vb2_dma_sg_prepare,
403 .finish = vb2_dma_sg_finish,
5ba3f757
AP
404 .vaddr = vb2_dma_sg_vaddr,
405 .mmap = vb2_dma_sg_mmap,
406 .num_users = vb2_dma_sg_num_users,
407 .cookie = vb2_dma_sg_cookie,
408};
409EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
410
0c3a14c1
HV
411void *vb2_dma_sg_init_ctx(struct device *dev)
412{
413 struct vb2_dma_sg_conf *conf;
414
415 conf = kzalloc(sizeof(*conf), GFP_KERNEL);
416 if (!conf)
417 return ERR_PTR(-ENOMEM);
418
419 conf->dev = dev;
420
421 return conf;
422}
423EXPORT_SYMBOL_GPL(vb2_dma_sg_init_ctx);
424
425void vb2_dma_sg_cleanup_ctx(void *alloc_ctx)
426{
427 if (!IS_ERR_OR_NULL(alloc_ctx))
428 kfree(alloc_ctx);
429}
430EXPORT_SYMBOL_GPL(vb2_dma_sg_cleanup_ctx);
431
5ba3f757
AP
432MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
433MODULE_AUTHOR("Andrzej Pietrasiewicz");
434MODULE_LICENSE("GPL");