[media] media: i2c: lm3560: fix missing unlock on error, fault handling
[linux-2.6-block.git] / drivers / media / v4l2-core / videobuf2-dma-sg.c
CommitLineData
5ba3f757
AP
1/*
2 * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13#include <linux/module.h>
14#include <linux/mm.h>
15#include <linux/scatterlist.h>
16#include <linux/sched.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19
20#include <media/videobuf2-core.h>
21#include <media/videobuf2-memops.h>
22#include <media/videobuf2-dma-sg.h>
23
ffdc78ef
HV
24static int debug;
25module_param(debug, int, 0644);
26
27#define dprintk(level, fmt, arg...) \
28 do { \
29 if (debug >= level) \
30 printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \
31 } while (0)
32
5ba3f757
AP
33struct vb2_dma_sg_buf {
34 void *vaddr;
35 struct page **pages;
36 int write;
37 int offset;
22301247
RR
38 struct sg_table sg_table;
39 size_t size;
40 unsigned int num_pages;
5ba3f757
AP
41 atomic_t refcount;
42 struct vb2_vmarea_handler handler;
43};
44
45static void vb2_dma_sg_put(void *buf_priv);
46
df237281
RR
47static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf,
48 gfp_t gfp_flags)
49{
50 unsigned int last_page = 0;
22301247 51 int size = buf->size;
df237281
RR
52
53 while (size > 0) {
54 struct page *pages;
55 int order;
56 int i;
57
58 order = get_order(size);
59 /* Dont over allocate*/
60 if ((PAGE_SIZE << order) > size)
61 order--;
62
63 pages = NULL;
64 while (!pages) {
65 pages = alloc_pages(GFP_KERNEL | __GFP_ZERO |
66 __GFP_NOWARN | gfp_flags, order);
67 if (pages)
68 break;
69
70 if (order == 0) {
71 while (last_page--)
72 __free_page(buf->pages[last_page]);
73 return -ENOMEM;
74 }
75 order--;
76 }
77
78 split_page(pages, order);
22301247
RR
79 for (i = 0; i < (1 << order); i++)
80 buf->pages[last_page++] = &pages[i];
df237281
RR
81
82 size -= PAGE_SIZE << order;
83 }
84
85 return 0;
86}
87
b6ba2057 88static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, gfp_t gfp_flags)
5ba3f757
AP
89{
90 struct vb2_dma_sg_buf *buf;
df237281 91 int ret;
22301247 92 int num_pages;
5ba3f757
AP
93
94 buf = kzalloc(sizeof *buf, GFP_KERNEL);
95 if (!buf)
96 return NULL;
97
98 buf->vaddr = NULL;
99 buf->write = 0;
100 buf->offset = 0;
22301247 101 buf->size = size;
7f841459 102 /* size is already page aligned */
22301247 103 buf->num_pages = size >> PAGE_SHIFT;
5ba3f757 104
22301247 105 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
5ba3f757
AP
106 GFP_KERNEL);
107 if (!buf->pages)
108 goto fail_pages_array_alloc;
109
df237281
RR
110 ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
111 if (ret)
112 goto fail_pages_alloc;
5ba3f757 113
22301247
RR
114 ret = sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
115 buf->num_pages, 0, size, gfp_flags);
116 if (ret)
117 goto fail_table_alloc;
118
5ba3f757
AP
119 buf->handler.refcount = &buf->refcount;
120 buf->handler.put = vb2_dma_sg_put;
121 buf->handler.arg = buf;
122
123 atomic_inc(&buf->refcount);
124
ffdc78ef 125 dprintk(1, "%s: Allocated buffer of %d pages\n",
22301247 126 __func__, buf->num_pages);
5ba3f757
AP
127 return buf;
128
22301247
RR
129fail_table_alloc:
130 num_pages = buf->num_pages;
131 while (num_pages--)
132 __free_page(buf->pages[num_pages]);
5ba3f757 133fail_pages_alloc:
a9bb36aa 134 kfree(buf->pages);
5ba3f757 135fail_pages_array_alloc:
5ba3f757
AP
136 kfree(buf);
137 return NULL;
138}
139
140static void vb2_dma_sg_put(void *buf_priv)
141{
142 struct vb2_dma_sg_buf *buf = buf_priv;
22301247 143 int i = buf->num_pages;
5ba3f757
AP
144
145 if (atomic_dec_and_test(&buf->refcount)) {
ffdc78ef 146 dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
22301247 147 buf->num_pages);
5ba3f757 148 if (buf->vaddr)
22301247
RR
149 vm_unmap_ram(buf->vaddr, buf->num_pages);
150 sg_free_table(&buf->sg_table);
5ba3f757
AP
151 while (--i >= 0)
152 __free_page(buf->pages[i]);
153 kfree(buf->pages);
154 kfree(buf);
155 }
156}
157
158static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
159 unsigned long size, int write)
160{
161 struct vb2_dma_sg_buf *buf;
162 unsigned long first, last;
22301247 163 int num_pages_from_user;
5ba3f757
AP
164
165 buf = kzalloc(sizeof *buf, GFP_KERNEL);
166 if (!buf)
167 return NULL;
168
169 buf->vaddr = NULL;
170 buf->write = write;
171 buf->offset = vaddr & ~PAGE_MASK;
22301247 172 buf->size = size;
5ba3f757
AP
173
174 first = (vaddr & PAGE_MASK) >> PAGE_SHIFT;
175 last = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
22301247 176 buf->num_pages = last - first + 1;
5ba3f757 177
22301247 178 buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
5ba3f757
AP
179 GFP_KERNEL);
180 if (!buf->pages)
22301247 181 return NULL;
5ba3f757 182
5ba3f757
AP
183 num_pages_from_user = get_user_pages(current, current->mm,
184 vaddr & PAGE_MASK,
22301247 185 buf->num_pages,
5ba3f757
AP
186 write,
187 1, /* force */
188 buf->pages,
189 NULL);
b037c0fd 190
22301247 191 if (num_pages_from_user != buf->num_pages)
5ba3f757
AP
192 goto userptr_fail_get_user_pages;
193
22301247
RR
194 if (sg_alloc_table_from_pages(&buf->sg_table, buf->pages,
195 buf->num_pages, buf->offset, size, 0))
196 goto userptr_fail_alloc_table_from_pages;
197
5ba3f757
AP
198 return buf;
199
22301247 200userptr_fail_alloc_table_from_pages:
5ba3f757 201userptr_fail_get_user_pages:
ffdc78ef 202 dprintk(1, "get_user_pages requested/got: %d/%d]\n",
22301247 203 num_pages_from_user, buf->num_pages);
5ba3f757
AP
204 while (--num_pages_from_user >= 0)
205 put_page(buf->pages[num_pages_from_user]);
a9bb36aa 206 kfree(buf->pages);
5ba3f757
AP
207 kfree(buf);
208 return NULL;
209}
210
211/*
212 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
213 * be used
214 */
215static void vb2_dma_sg_put_userptr(void *buf_priv)
216{
217 struct vb2_dma_sg_buf *buf = buf_priv;
22301247 218 int i = buf->num_pages;
5ba3f757 219
ffdc78ef 220 dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
22301247 221 __func__, buf->num_pages);
5ba3f757 222 if (buf->vaddr)
22301247
RR
223 vm_unmap_ram(buf->vaddr, buf->num_pages);
224 sg_free_table(&buf->sg_table);
5ba3f757
AP
225 while (--i >= 0) {
226 if (buf->write)
227 set_page_dirty_lock(buf->pages[i]);
228 put_page(buf->pages[i]);
229 }
5ba3f757
AP
230 kfree(buf->pages);
231 kfree(buf);
232}
233
234static void *vb2_dma_sg_vaddr(void *buf_priv)
235{
236 struct vb2_dma_sg_buf *buf = buf_priv;
237
238 BUG_ON(!buf);
239
240 if (!buf->vaddr)
241 buf->vaddr = vm_map_ram(buf->pages,
22301247 242 buf->num_pages,
5ba3f757
AP
243 -1,
244 PAGE_KERNEL);
245
246 /* add offset in case userptr is not page-aligned */
247 return buf->vaddr + buf->offset;
248}
249
250static unsigned int vb2_dma_sg_num_users(void *buf_priv)
251{
252 struct vb2_dma_sg_buf *buf = buf_priv;
253
254 return atomic_read(&buf->refcount);
255}
256
257static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma)
258{
259 struct vb2_dma_sg_buf *buf = buf_priv;
260 unsigned long uaddr = vma->vm_start;
261 unsigned long usize = vma->vm_end - vma->vm_start;
262 int i = 0;
263
264 if (!buf) {
265 printk(KERN_ERR "No memory to map\n");
266 return -EINVAL;
267 }
268
269 do {
270 int ret;
271
272 ret = vm_insert_page(vma, uaddr, buf->pages[i++]);
273 if (ret) {
274 printk(KERN_ERR "Remapping memory, error: %d\n", ret);
275 return ret;
276 }
277
278 uaddr += PAGE_SIZE;
279 usize -= PAGE_SIZE;
280 } while (usize > 0);
281
282
283 /*
284 * Use common vm_area operations to track buffer refcount.
285 */
286 vma->vm_private_data = &buf->handler;
287 vma->vm_ops = &vb2_common_vm_ops;
288
289 vma->vm_ops->open(vma);
290
291 return 0;
292}
293
294static void *vb2_dma_sg_cookie(void *buf_priv)
295{
296 struct vb2_dma_sg_buf *buf = buf_priv;
297
22301247 298 return &buf->sg_table;
5ba3f757
AP
299}
300
301const struct vb2_mem_ops vb2_dma_sg_memops = {
302 .alloc = vb2_dma_sg_alloc,
303 .put = vb2_dma_sg_put,
304 .get_userptr = vb2_dma_sg_get_userptr,
305 .put_userptr = vb2_dma_sg_put_userptr,
306 .vaddr = vb2_dma_sg_vaddr,
307 .mmap = vb2_dma_sg_mmap,
308 .num_users = vb2_dma_sg_num_users,
309 .cookie = vb2_dma_sg_cookie,
310};
311EXPORT_SYMBOL_GPL(vb2_dma_sg_memops);
312
313MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2");
314MODULE_AUTHOR("Andrzej Pietrasiewicz");
315MODULE_LICENSE("GPL");