Commit | Line | Data |
---|---|---|
5ba3f757 AP |
1 | /* |
2 | * videobuf2-dma-sg.c - dma scatter/gather memory allocator for videobuf2 | |
3 | * | |
4 | * Copyright (C) 2010 Samsung Electronics | |
5 | * | |
6 | * Author: Andrzej Pietrasiewicz <andrzej.p@samsung.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation. | |
11 | */ | |
12 | ||
13 | #include <linux/module.h> | |
14 | #include <linux/mm.h> | |
15 | #include <linux/scatterlist.h> | |
16 | #include <linux/sched.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/vmalloc.h> | |
19 | ||
c139990e | 20 | #include <media/videobuf2-v4l2.h> |
5ba3f757 AP |
21 | #include <media/videobuf2-memops.h> |
22 | #include <media/videobuf2-dma-sg.h> | |
23 | ||
ffdc78ef HV |
24 | static int debug; |
25 | module_param(debug, int, 0644); | |
26 | ||
27 | #define dprintk(level, fmt, arg...) \ | |
28 | do { \ | |
29 | if (debug >= level) \ | |
30 | printk(KERN_DEBUG "vb2-dma-sg: " fmt, ## arg); \ | |
31 | } while (0) | |
32 | ||
0c3a14c1 HV |
33 | struct vb2_dma_sg_conf { |
34 | struct device *dev; | |
35 | }; | |
36 | ||
5ba3f757 | 37 | struct vb2_dma_sg_buf { |
0c3a14c1 | 38 | struct device *dev; |
5ba3f757 AP |
39 | void *vaddr; |
40 | struct page **pages; | |
3336c24f | 41 | struct frame_vector *vec; |
5ba3f757 | 42 | int offset; |
cd474037 | 43 | enum dma_data_direction dma_dir; |
22301247 | 44 | struct sg_table sg_table; |
e078b79d HV |
45 | /* |
46 | * This will point to sg_table when used with the MMAP or USERPTR | |
47 | * memory model, and to the dma_buf sglist when used with the | |
48 | * DMABUF memory model. | |
49 | */ | |
50 | struct sg_table *dma_sgt; | |
22301247 RR |
51 | size_t size; |
52 | unsigned int num_pages; | |
5ba3f757 AP |
53 | atomic_t refcount; |
54 | struct vb2_vmarea_handler handler; | |
e078b79d HV |
55 | |
56 | struct dma_buf_attachment *db_attach; | |
5ba3f757 AP |
57 | }; |
58 | ||
59 | static void vb2_dma_sg_put(void *buf_priv); | |
60 | ||
df237281 RR |
61 | static int vb2_dma_sg_alloc_compacted(struct vb2_dma_sg_buf *buf, |
62 | gfp_t gfp_flags) | |
63 | { | |
64 | unsigned int last_page = 0; | |
22301247 | 65 | int size = buf->size; |
df237281 RR |
66 | |
67 | while (size > 0) { | |
68 | struct page *pages; | |
69 | int order; | |
70 | int i; | |
71 | ||
72 | order = get_order(size); | |
73 | /* Dont over allocate*/ | |
74 | if ((PAGE_SIZE << order) > size) | |
75 | order--; | |
76 | ||
77 | pages = NULL; | |
78 | while (!pages) { | |
79 | pages = alloc_pages(GFP_KERNEL | __GFP_ZERO | | |
80 | __GFP_NOWARN | gfp_flags, order); | |
81 | if (pages) | |
82 | break; | |
83 | ||
84 | if (order == 0) { | |
85 | while (last_page--) | |
86 | __free_page(buf->pages[last_page]); | |
87 | return -ENOMEM; | |
88 | } | |
89 | order--; | |
90 | } | |
91 | ||
92 | split_page(pages, order); | |
22301247 RR |
93 | for (i = 0; i < (1 << order); i++) |
94 | buf->pages[last_page++] = &pages[i]; | |
df237281 RR |
95 | |
96 | size -= PAGE_SIZE << order; | |
97 | } | |
98 | ||
99 | return 0; | |
100 | } | |
101 | ||
d935c57e HV |
102 | static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size, |
103 | enum dma_data_direction dma_dir, gfp_t gfp_flags) | |
5ba3f757 | 104 | { |
0c3a14c1 | 105 | struct vb2_dma_sg_conf *conf = alloc_ctx; |
5ba3f757 | 106 | struct vb2_dma_sg_buf *buf; |
d790b7ed | 107 | struct sg_table *sgt; |
df237281 | 108 | int ret; |
22301247 | 109 | int num_pages; |
251a79f8 HV |
110 | DEFINE_DMA_ATTRS(attrs); |
111 | ||
112 | dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); | |
5ba3f757 | 113 | |
0c3a14c1 HV |
114 | if (WARN_ON(alloc_ctx == NULL)) |
115 | return NULL; | |
5ba3f757 AP |
116 | buf = kzalloc(sizeof *buf, GFP_KERNEL); |
117 | if (!buf) | |
118 | return NULL; | |
119 | ||
120 | buf->vaddr = NULL; | |
d935c57e | 121 | buf->dma_dir = dma_dir; |
5ba3f757 | 122 | buf->offset = 0; |
22301247 | 123 | buf->size = size; |
7f841459 | 124 | /* size is already page aligned */ |
22301247 | 125 | buf->num_pages = size >> PAGE_SHIFT; |
e078b79d | 126 | buf->dma_sgt = &buf->sg_table; |
5ba3f757 | 127 | |
22301247 | 128 | buf->pages = kzalloc(buf->num_pages * sizeof(struct page *), |
5ba3f757 AP |
129 | GFP_KERNEL); |
130 | if (!buf->pages) | |
131 | goto fail_pages_array_alloc; | |
132 | ||
df237281 RR |
133 | ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags); |
134 | if (ret) | |
135 | goto fail_pages_alloc; | |
5ba3f757 | 136 | |
e078b79d | 137 | ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, |
47bc59c5 | 138 | buf->num_pages, 0, size, GFP_KERNEL); |
22301247 RR |
139 | if (ret) |
140 | goto fail_table_alloc; | |
141 | ||
0c3a14c1 HV |
142 | /* Prevent the device from being released while the buffer is used */ |
143 | buf->dev = get_device(conf->dev); | |
d790b7ed HV |
144 | |
145 | sgt = &buf->sg_table; | |
251a79f8 HV |
146 | /* |
147 | * No need to sync to the device, this will happen later when the | |
148 | * prepare() memop is called. | |
149 | */ | |
6a5d77cb RR |
150 | sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, |
151 | buf->dma_dir, &attrs); | |
152 | if (!sgt->nents) | |
d790b7ed | 153 | goto fail_map; |
d790b7ed | 154 | |
5ba3f757 AP |
155 | buf->handler.refcount = &buf->refcount; |
156 | buf->handler.put = vb2_dma_sg_put; | |
157 | buf->handler.arg = buf; | |
158 | ||
159 | atomic_inc(&buf->refcount); | |
160 | ||
ffdc78ef | 161 | dprintk(1, "%s: Allocated buffer of %d pages\n", |
22301247 | 162 | __func__, buf->num_pages); |
5ba3f757 AP |
163 | return buf; |
164 | ||
d790b7ed HV |
165 | fail_map: |
166 | put_device(buf->dev); | |
e078b79d | 167 | sg_free_table(buf->dma_sgt); |
22301247 RR |
168 | fail_table_alloc: |
169 | num_pages = buf->num_pages; | |
170 | while (num_pages--) | |
171 | __free_page(buf->pages[num_pages]); | |
5ba3f757 | 172 | fail_pages_alloc: |
a9bb36aa | 173 | kfree(buf->pages); |
5ba3f757 | 174 | fail_pages_array_alloc: |
5ba3f757 AP |
175 | kfree(buf); |
176 | return NULL; | |
177 | } | |
178 | ||
179 | static void vb2_dma_sg_put(void *buf_priv) | |
180 | { | |
181 | struct vb2_dma_sg_buf *buf = buf_priv; | |
d790b7ed | 182 | struct sg_table *sgt = &buf->sg_table; |
22301247 | 183 | int i = buf->num_pages; |
5ba3f757 AP |
184 | |
185 | if (atomic_dec_and_test(&buf->refcount)) { | |
251a79f8 HV |
186 | DEFINE_DMA_ATTRS(attrs); |
187 | ||
188 | dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); | |
ffdc78ef | 189 | dprintk(1, "%s: Freeing buffer of %d pages\n", __func__, |
22301247 | 190 | buf->num_pages); |
6a5d77cb | 191 | dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, |
251a79f8 | 192 | buf->dma_dir, &attrs); |
5ba3f757 | 193 | if (buf->vaddr) |
22301247 | 194 | vm_unmap_ram(buf->vaddr, buf->num_pages); |
e078b79d | 195 | sg_free_table(buf->dma_sgt); |
5ba3f757 AP |
196 | while (--i >= 0) |
197 | __free_page(buf->pages[i]); | |
198 | kfree(buf->pages); | |
0c3a14c1 | 199 | put_device(buf->dev); |
5ba3f757 AP |
200 | kfree(buf); |
201 | } | |
202 | } | |
203 | ||
d790b7ed HV |
204 | static void vb2_dma_sg_prepare(void *buf_priv) |
205 | { | |
206 | struct vb2_dma_sg_buf *buf = buf_priv; | |
e078b79d HV |
207 | struct sg_table *sgt = buf->dma_sgt; |
208 | ||
209 | /* DMABUF exporter will flush the cache for us */ | |
210 | if (buf->db_attach) | |
211 | return; | |
d790b7ed | 212 | |
418dae22 TL |
213 | dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->orig_nents, |
214 | buf->dma_dir); | |
d790b7ed HV |
215 | } |
216 | ||
217 | static void vb2_dma_sg_finish(void *buf_priv) | |
218 | { | |
219 | struct vb2_dma_sg_buf *buf = buf_priv; | |
e078b79d HV |
220 | struct sg_table *sgt = buf->dma_sgt; |
221 | ||
222 | /* DMABUF exporter will flush the cache for us */ | |
223 | if (buf->db_attach) | |
224 | return; | |
d790b7ed | 225 | |
418dae22 | 226 | dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir); |
d790b7ed HV |
227 | } |
228 | ||
5ba3f757 | 229 | static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr, |
cd474037 HV |
230 | unsigned long size, |
231 | enum dma_data_direction dma_dir) | |
5ba3f757 | 232 | { |
d790b7ed | 233 | struct vb2_dma_sg_conf *conf = alloc_ctx; |
5ba3f757 | 234 | struct vb2_dma_sg_buf *buf; |
d790b7ed | 235 | struct sg_table *sgt; |
251a79f8 | 236 | DEFINE_DMA_ATTRS(attrs); |
3336c24f | 237 | struct frame_vector *vec; |
251a79f8 HV |
238 | |
239 | dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); | |
5ba3f757 AP |
240 | buf = kzalloc(sizeof *buf, GFP_KERNEL); |
241 | if (!buf) | |
242 | return NULL; | |
243 | ||
244 | buf->vaddr = NULL; | |
d790b7ed | 245 | buf->dev = conf->dev; |
cd474037 | 246 | buf->dma_dir = dma_dir; |
5ba3f757 | 247 | buf->offset = vaddr & ~PAGE_MASK; |
22301247 | 248 | buf->size = size; |
e078b79d | 249 | buf->dma_sgt = &buf->sg_table; |
3336c24f JK |
250 | vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE); |
251 | if (IS_ERR(vec)) | |
252 | goto userptr_fail_pfnvec; | |
253 | buf->vec = vec; | |
5ba3f757 | 254 | |
3336c24f JK |
255 | buf->pages = frame_vector_pages(vec); |
256 | if (IS_ERR(buf->pages)) | |
257 | goto userptr_fail_sgtable; | |
258 | buf->num_pages = frame_vector_count(vec); | |
5ba3f757 | 259 | |
e078b79d | 260 | if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages, |
22301247 | 261 | buf->num_pages, buf->offset, size, 0)) |
3336c24f | 262 | goto userptr_fail_sgtable; |
22301247 | 263 | |
d790b7ed | 264 | sgt = &buf->sg_table; |
251a79f8 HV |
265 | /* |
266 | * No need to sync to the device, this will happen later when the | |
267 | * prepare() memop is called. | |
268 | */ | |
6a5d77cb RR |
269 | sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, |
270 | buf->dma_dir, &attrs); | |
271 | if (!sgt->nents) | |
d790b7ed | 272 | goto userptr_fail_map; |
6a5d77cb | 273 | |
5ba3f757 AP |
274 | return buf; |
275 | ||
d790b7ed HV |
276 | userptr_fail_map: |
277 | sg_free_table(&buf->sg_table); | |
3336c24f JK |
278 | userptr_fail_sgtable: |
279 | vb2_destroy_framevec(vec); | |
280 | userptr_fail_pfnvec: | |
5ba3f757 AP |
281 | kfree(buf); |
282 | return NULL; | |
283 | } | |
284 | ||
285 | /* | |
286 | * @put_userptr: inform the allocator that a USERPTR buffer will no longer | |
287 | * be used | |
288 | */ | |
289 | static void vb2_dma_sg_put_userptr(void *buf_priv) | |
290 | { | |
291 | struct vb2_dma_sg_buf *buf = buf_priv; | |
d790b7ed | 292 | struct sg_table *sgt = &buf->sg_table; |
22301247 | 293 | int i = buf->num_pages; |
251a79f8 HV |
294 | DEFINE_DMA_ATTRS(attrs); |
295 | ||
296 | dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs); | |
5ba3f757 | 297 | |
ffdc78ef | 298 | dprintk(1, "%s: Releasing userspace buffer of %d pages\n", |
22301247 | 299 | __func__, buf->num_pages); |
6a5d77cb RR |
300 | dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir, |
301 | &attrs); | |
5ba3f757 | 302 | if (buf->vaddr) |
22301247 | 303 | vm_unmap_ram(buf->vaddr, buf->num_pages); |
e078b79d | 304 | sg_free_table(buf->dma_sgt); |
5ba3f757 | 305 | while (--i >= 0) { |
cd474037 | 306 | if (buf->dma_dir == DMA_FROM_DEVICE) |
5ba3f757 | 307 | set_page_dirty_lock(buf->pages[i]); |
5ba3f757 | 308 | } |
3336c24f | 309 | vb2_destroy_framevec(buf->vec); |
5ba3f757 AP |
310 | kfree(buf); |
311 | } | |
312 | ||
313 | static void *vb2_dma_sg_vaddr(void *buf_priv) | |
314 | { | |
315 | struct vb2_dma_sg_buf *buf = buf_priv; | |
316 | ||
317 | BUG_ON(!buf); | |
318 | ||
e078b79d HV |
319 | if (!buf->vaddr) { |
320 | if (buf->db_attach) | |
321 | buf->vaddr = dma_buf_vmap(buf->db_attach->dmabuf); | |
322 | else | |
323 | buf->vaddr = vm_map_ram(buf->pages, | |
324 | buf->num_pages, -1, PAGE_KERNEL); | |
325 | } | |
5ba3f757 AP |
326 | |
327 | /* add offset in case userptr is not page-aligned */ | |
e078b79d | 328 | return buf->vaddr ? buf->vaddr + buf->offset : NULL; |
5ba3f757 AP |
329 | } |
330 | ||
331 | static unsigned int vb2_dma_sg_num_users(void *buf_priv) | |
332 | { | |
333 | struct vb2_dma_sg_buf *buf = buf_priv; | |
334 | ||
335 | return atomic_read(&buf->refcount); | |
336 | } | |
337 | ||
338 | static int vb2_dma_sg_mmap(void *buf_priv, struct vm_area_struct *vma) | |
339 | { | |
340 | struct vb2_dma_sg_buf *buf = buf_priv; | |
341 | unsigned long uaddr = vma->vm_start; | |
342 | unsigned long usize = vma->vm_end - vma->vm_start; | |
343 | int i = 0; | |
344 | ||
345 | if (!buf) { | |
346 | printk(KERN_ERR "No memory to map\n"); | |
347 | return -EINVAL; | |
348 | } | |
349 | ||
350 | do { | |
351 | int ret; | |
352 | ||
353 | ret = vm_insert_page(vma, uaddr, buf->pages[i++]); | |
354 | if (ret) { | |
355 | printk(KERN_ERR "Remapping memory, error: %d\n", ret); | |
356 | return ret; | |
357 | } | |
358 | ||
359 | uaddr += PAGE_SIZE; | |
360 | usize -= PAGE_SIZE; | |
361 | } while (usize > 0); | |
362 | ||
363 | ||
364 | /* | |
365 | * Use common vm_area operations to track buffer refcount. | |
366 | */ | |
367 | vma->vm_private_data = &buf->handler; | |
368 | vma->vm_ops = &vb2_common_vm_ops; | |
369 | ||
370 | vma->vm_ops->open(vma); | |
371 | ||
372 | return 0; | |
373 | } | |
374 | ||
041c7b6a HV |
375 | /*********************************************/ |
376 | /* DMABUF ops for exporters */ | |
377 | /*********************************************/ | |
378 | ||
379 | struct vb2_dma_sg_attachment { | |
380 | struct sg_table sgt; | |
381 | enum dma_data_direction dma_dir; | |
382 | }; | |
383 | ||
384 | static int vb2_dma_sg_dmabuf_ops_attach(struct dma_buf *dbuf, struct device *dev, | |
385 | struct dma_buf_attachment *dbuf_attach) | |
386 | { | |
387 | struct vb2_dma_sg_attachment *attach; | |
388 | unsigned int i; | |
389 | struct scatterlist *rd, *wr; | |
390 | struct sg_table *sgt; | |
391 | struct vb2_dma_sg_buf *buf = dbuf->priv; | |
392 | int ret; | |
393 | ||
394 | attach = kzalloc(sizeof(*attach), GFP_KERNEL); | |
395 | if (!attach) | |
396 | return -ENOMEM; | |
397 | ||
398 | sgt = &attach->sgt; | |
399 | /* Copy the buf->base_sgt scatter list to the attachment, as we can't | |
400 | * map the same scatter list to multiple attachments at the same time. | |
401 | */ | |
402 | ret = sg_alloc_table(sgt, buf->dma_sgt->orig_nents, GFP_KERNEL); | |
403 | if (ret) { | |
404 | kfree(attach); | |
405 | return -ENOMEM; | |
406 | } | |
407 | ||
408 | rd = buf->dma_sgt->sgl; | |
409 | wr = sgt->sgl; | |
410 | for (i = 0; i < sgt->orig_nents; ++i) { | |
411 | sg_set_page(wr, sg_page(rd), rd->length, rd->offset); | |
412 | rd = sg_next(rd); | |
413 | wr = sg_next(wr); | |
414 | } | |
415 | ||
416 | attach->dma_dir = DMA_NONE; | |
417 | dbuf_attach->priv = attach; | |
418 | ||
419 | return 0; | |
420 | } | |
421 | ||
422 | static void vb2_dma_sg_dmabuf_ops_detach(struct dma_buf *dbuf, | |
423 | struct dma_buf_attachment *db_attach) | |
424 | { | |
425 | struct vb2_dma_sg_attachment *attach = db_attach->priv; | |
426 | struct sg_table *sgt; | |
427 | ||
428 | if (!attach) | |
429 | return; | |
430 | ||
431 | sgt = &attach->sgt; | |
432 | ||
433 | /* release the scatterlist cache */ | |
434 | if (attach->dma_dir != DMA_NONE) | |
435 | dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, | |
436 | attach->dma_dir); | |
437 | sg_free_table(sgt); | |
438 | kfree(attach); | |
439 | db_attach->priv = NULL; | |
440 | } | |
441 | ||
442 | static struct sg_table *vb2_dma_sg_dmabuf_ops_map( | |
443 | struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) | |
444 | { | |
445 | struct vb2_dma_sg_attachment *attach = db_attach->priv; | |
446 | /* stealing dmabuf mutex to serialize map/unmap operations */ | |
447 | struct mutex *lock = &db_attach->dmabuf->lock; | |
448 | struct sg_table *sgt; | |
041c7b6a HV |
449 | |
450 | mutex_lock(lock); | |
451 | ||
452 | sgt = &attach->sgt; | |
453 | /* return previously mapped sg table */ | |
454 | if (attach->dma_dir == dma_dir) { | |
455 | mutex_unlock(lock); | |
456 | return sgt; | |
457 | } | |
458 | ||
459 | /* release any previous cache */ | |
460 | if (attach->dma_dir != DMA_NONE) { | |
461 | dma_unmap_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, | |
462 | attach->dma_dir); | |
463 | attach->dma_dir = DMA_NONE; | |
464 | } | |
465 | ||
466 | /* mapping to the client with new direction */ | |
6a5d77cb RR |
467 | sgt->nents = dma_map_sg(db_attach->dev, sgt->sgl, sgt->orig_nents, |
468 | dma_dir); | |
469 | if (!sgt->nents) { | |
041c7b6a HV |
470 | pr_err("failed to map scatterlist\n"); |
471 | mutex_unlock(lock); | |
472 | return ERR_PTR(-EIO); | |
473 | } | |
474 | ||
475 | attach->dma_dir = dma_dir; | |
476 | ||
477 | mutex_unlock(lock); | |
478 | ||
479 | return sgt; | |
480 | } | |
481 | ||
482 | static void vb2_dma_sg_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, | |
483 | struct sg_table *sgt, enum dma_data_direction dma_dir) | |
484 | { | |
485 | /* nothing to be done here */ | |
486 | } | |
487 | ||
488 | static void vb2_dma_sg_dmabuf_ops_release(struct dma_buf *dbuf) | |
489 | { | |
490 | /* drop reference obtained in vb2_dma_sg_get_dmabuf */ | |
491 | vb2_dma_sg_put(dbuf->priv); | |
492 | } | |
493 | ||
494 | static void *vb2_dma_sg_dmabuf_ops_kmap(struct dma_buf *dbuf, unsigned long pgnum) | |
495 | { | |
496 | struct vb2_dma_sg_buf *buf = dbuf->priv; | |
497 | ||
498 | return buf->vaddr ? buf->vaddr + pgnum * PAGE_SIZE : NULL; | |
499 | } | |
500 | ||
501 | static void *vb2_dma_sg_dmabuf_ops_vmap(struct dma_buf *dbuf) | |
502 | { | |
503 | struct vb2_dma_sg_buf *buf = dbuf->priv; | |
504 | ||
505 | return vb2_dma_sg_vaddr(buf); | |
506 | } | |
507 | ||
508 | static int vb2_dma_sg_dmabuf_ops_mmap(struct dma_buf *dbuf, | |
509 | struct vm_area_struct *vma) | |
510 | { | |
511 | return vb2_dma_sg_mmap(dbuf->priv, vma); | |
512 | } | |
513 | ||
514 | static struct dma_buf_ops vb2_dma_sg_dmabuf_ops = { | |
515 | .attach = vb2_dma_sg_dmabuf_ops_attach, | |
516 | .detach = vb2_dma_sg_dmabuf_ops_detach, | |
517 | .map_dma_buf = vb2_dma_sg_dmabuf_ops_map, | |
518 | .unmap_dma_buf = vb2_dma_sg_dmabuf_ops_unmap, | |
519 | .kmap = vb2_dma_sg_dmabuf_ops_kmap, | |
520 | .kmap_atomic = vb2_dma_sg_dmabuf_ops_kmap, | |
521 | .vmap = vb2_dma_sg_dmabuf_ops_vmap, | |
522 | .mmap = vb2_dma_sg_dmabuf_ops_mmap, | |
523 | .release = vb2_dma_sg_dmabuf_ops_release, | |
524 | }; | |
525 | ||
526 | static struct dma_buf *vb2_dma_sg_get_dmabuf(void *buf_priv, unsigned long flags) | |
527 | { | |
528 | struct vb2_dma_sg_buf *buf = buf_priv; | |
529 | struct dma_buf *dbuf; | |
d8fbe341 SS |
530 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info); |
531 | ||
532 | exp_info.ops = &vb2_dma_sg_dmabuf_ops; | |
533 | exp_info.size = buf->size; | |
534 | exp_info.flags = flags; | |
535 | exp_info.priv = buf; | |
041c7b6a HV |
536 | |
537 | if (WARN_ON(!buf->dma_sgt)) | |
538 | return NULL; | |
539 | ||
d8fbe341 | 540 | dbuf = dma_buf_export(&exp_info); |
041c7b6a HV |
541 | if (IS_ERR(dbuf)) |
542 | return NULL; | |
543 | ||
544 | /* dmabuf keeps reference to vb2 buffer */ | |
545 | atomic_inc(&buf->refcount); | |
546 | ||
547 | return dbuf; | |
548 | } | |
549 | ||
e078b79d HV |
550 | /*********************************************/ |
551 | /* callbacks for DMABUF buffers */ | |
552 | /*********************************************/ | |
553 | ||
554 | static int vb2_dma_sg_map_dmabuf(void *mem_priv) | |
555 | { | |
556 | struct vb2_dma_sg_buf *buf = mem_priv; | |
557 | struct sg_table *sgt; | |
558 | ||
559 | if (WARN_ON(!buf->db_attach)) { | |
560 | pr_err("trying to pin a non attached buffer\n"); | |
561 | return -EINVAL; | |
562 | } | |
563 | ||
564 | if (WARN_ON(buf->dma_sgt)) { | |
565 | pr_err("dmabuf buffer is already pinned\n"); | |
566 | return 0; | |
567 | } | |
568 | ||
569 | /* get the associated scatterlist for this buffer */ | |
570 | sgt = dma_buf_map_attachment(buf->db_attach, buf->dma_dir); | |
571 | if (IS_ERR(sgt)) { | |
572 | pr_err("Error getting dmabuf scatterlist\n"); | |
573 | return -EINVAL; | |
574 | } | |
575 | ||
576 | buf->dma_sgt = sgt; | |
577 | buf->vaddr = NULL; | |
578 | ||
579 | return 0; | |
580 | } | |
581 | ||
582 | static void vb2_dma_sg_unmap_dmabuf(void *mem_priv) | |
583 | { | |
584 | struct vb2_dma_sg_buf *buf = mem_priv; | |
585 | struct sg_table *sgt = buf->dma_sgt; | |
586 | ||
587 | if (WARN_ON(!buf->db_attach)) { | |
588 | pr_err("trying to unpin a not attached buffer\n"); | |
589 | return; | |
590 | } | |
591 | ||
592 | if (WARN_ON(!sgt)) { | |
593 | pr_err("dmabuf buffer is already unpinned\n"); | |
594 | return; | |
595 | } | |
596 | ||
597 | if (buf->vaddr) { | |
598 | dma_buf_vunmap(buf->db_attach->dmabuf, buf->vaddr); | |
599 | buf->vaddr = NULL; | |
600 | } | |
601 | dma_buf_unmap_attachment(buf->db_attach, sgt, buf->dma_dir); | |
602 | ||
603 | buf->dma_sgt = NULL; | |
604 | } | |
605 | ||
606 | static void vb2_dma_sg_detach_dmabuf(void *mem_priv) | |
607 | { | |
608 | struct vb2_dma_sg_buf *buf = mem_priv; | |
609 | ||
610 | /* if vb2 works correctly you should never detach mapped buffer */ | |
611 | if (WARN_ON(buf->dma_sgt)) | |
612 | vb2_dma_sg_unmap_dmabuf(buf); | |
613 | ||
614 | /* detach this attachment */ | |
615 | dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach); | |
616 | kfree(buf); | |
617 | } | |
618 | ||
619 | static void *vb2_dma_sg_attach_dmabuf(void *alloc_ctx, struct dma_buf *dbuf, | |
620 | unsigned long size, enum dma_data_direction dma_dir) | |
621 | { | |
622 | struct vb2_dma_sg_conf *conf = alloc_ctx; | |
623 | struct vb2_dma_sg_buf *buf; | |
624 | struct dma_buf_attachment *dba; | |
625 | ||
626 | if (dbuf->size < size) | |
627 | return ERR_PTR(-EFAULT); | |
628 | ||
629 | buf = kzalloc(sizeof(*buf), GFP_KERNEL); | |
630 | if (!buf) | |
631 | return ERR_PTR(-ENOMEM); | |
632 | ||
633 | buf->dev = conf->dev; | |
634 | /* create attachment for the dmabuf with the user device */ | |
635 | dba = dma_buf_attach(dbuf, buf->dev); | |
636 | if (IS_ERR(dba)) { | |
637 | pr_err("failed to attach dmabuf\n"); | |
638 | kfree(buf); | |
639 | return dba; | |
640 | } | |
641 | ||
642 | buf->dma_dir = dma_dir; | |
643 | buf->size = size; | |
644 | buf->db_attach = dba; | |
645 | ||
646 | return buf; | |
647 | } | |
648 | ||
5ba3f757 AP |
649 | static void *vb2_dma_sg_cookie(void *buf_priv) |
650 | { | |
651 | struct vb2_dma_sg_buf *buf = buf_priv; | |
652 | ||
e078b79d | 653 | return buf->dma_sgt; |
5ba3f757 AP |
654 | } |
655 | ||
656 | const struct vb2_mem_ops vb2_dma_sg_memops = { | |
657 | .alloc = vb2_dma_sg_alloc, | |
658 | .put = vb2_dma_sg_put, | |
659 | .get_userptr = vb2_dma_sg_get_userptr, | |
660 | .put_userptr = vb2_dma_sg_put_userptr, | |
d790b7ed HV |
661 | .prepare = vb2_dma_sg_prepare, |
662 | .finish = vb2_dma_sg_finish, | |
5ba3f757 AP |
663 | .vaddr = vb2_dma_sg_vaddr, |
664 | .mmap = vb2_dma_sg_mmap, | |
665 | .num_users = vb2_dma_sg_num_users, | |
041c7b6a | 666 | .get_dmabuf = vb2_dma_sg_get_dmabuf, |
e078b79d HV |
667 | .map_dmabuf = vb2_dma_sg_map_dmabuf, |
668 | .unmap_dmabuf = vb2_dma_sg_unmap_dmabuf, | |
669 | .attach_dmabuf = vb2_dma_sg_attach_dmabuf, | |
670 | .detach_dmabuf = vb2_dma_sg_detach_dmabuf, | |
5ba3f757 AP |
671 | .cookie = vb2_dma_sg_cookie, |
672 | }; | |
673 | EXPORT_SYMBOL_GPL(vb2_dma_sg_memops); | |
674 | ||
0c3a14c1 HV |
675 | void *vb2_dma_sg_init_ctx(struct device *dev) |
676 | { | |
677 | struct vb2_dma_sg_conf *conf; | |
678 | ||
679 | conf = kzalloc(sizeof(*conf), GFP_KERNEL); | |
680 | if (!conf) | |
681 | return ERR_PTR(-ENOMEM); | |
682 | ||
683 | conf->dev = dev; | |
684 | ||
685 | return conf; | |
686 | } | |
687 | EXPORT_SYMBOL_GPL(vb2_dma_sg_init_ctx); | |
688 | ||
689 | void vb2_dma_sg_cleanup_ctx(void *alloc_ctx) | |
690 | { | |
691 | if (!IS_ERR_OR_NULL(alloc_ctx)) | |
692 | kfree(alloc_ctx); | |
693 | } | |
694 | EXPORT_SYMBOL_GPL(vb2_dma_sg_cleanup_ctx); | |
695 | ||
5ba3f757 AP |
696 | MODULE_DESCRIPTION("dma scatter/gather memory handling routines for videobuf2"); |
697 | MODULE_AUTHOR("Andrzej Pietrasiewicz"); | |
698 | MODULE_LICENSE("GPL"); |