mm: replace vma->vm_flags direct modifications with modifier calls
[linux-block.git] / drivers / media / common / videobuf2 / videobuf2-dma-contig.c
CommitLineData
1a758d4e
PO
1/*
2 * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
95072084 6 * Author: Pawel Osciak <pawel@osciak.com>
1a758d4e
PO
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
8c417d03 13#include <linux/dma-buf.h>
3a6ca181 14#include <linux/dma-resv.h>
1a758d4e 15#include <linux/module.h>
6c4bb65d 16#include <linux/refcount.h>
e15dab75
TS
17#include <linux/scatterlist.h>
18#include <linux/sched.h>
1a758d4e
PO
19#include <linux/slab.h>
20#include <linux/dma-mapping.h>
de27891f 21#include <linux/highmem.h>
1a758d4e 22
c139990e 23#include <media/videobuf2-v4l2.h>
d0df3c38 24#include <media/videobuf2-dma-contig.h>
1a758d4e
PO
25#include <media/videobuf2-memops.h>
26
1a758d4e 27struct vb2_dc_buf {
72f86bff 28 struct device *dev;
1a758d4e 29 void *vaddr;
1a758d4e 30 unsigned long size;
ccc66e73 31 void *cookie;
40d8b766 32 dma_addr_t dma_addr;
00085f1e 33 unsigned long attrs;
e15dab75
TS
34 enum dma_data_direction dma_dir;
35 struct sg_table *dma_sgt;
fb639eb3 36 struct frame_vector *vec;
40d8b766
LP
37
38 /* MMAP related */
1a758d4e 39 struct vb2_vmarea_handler handler;
6c4bb65d 40 refcount_t refcount;
9ef2cbeb 41 struct sg_table *sgt_base;
40d8b766 42
8c417d03
SS
43 /* DMABUF related */
44 struct dma_buf_attachment *db_attach;
a4b83deb
SS
45
46 struct vb2_buffer *vb;
de27891f 47 bool non_coherent_mem;
1a758d4e
PO
48};
49
e15dab75
TS
50/*********************************************/
51/* scatterlist table functions */
52/*********************************************/
53
e15dab75
TS
54static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt)
55{
56 struct scatterlist *s;
57 dma_addr_t expected = sg_dma_address(sgt->sgl);
58 unsigned int i;
59 unsigned long size = 0;
60
8b7c0280 61 for_each_sgtable_dma_sg(sgt, s, i) {
e15dab75
TS
62 if (sg_dma_address(s) != expected)
63 break;
8b7c0280 64 expected += sg_dma_len(s);
e15dab75
TS
65 size += sg_dma_len(s);
66 }
67 return size;
68}
69
40d8b766
LP
70/*********************************************/
71/* callbacks for all buffers */
72/*********************************************/
73
a4b83deb 74static void *vb2_dc_cookie(struct vb2_buffer *vb, void *buf_priv)
40d8b766
LP
75{
76 struct vb2_dc_buf *buf = buf_priv;
77
78 return &buf->dma_addr;
79}
80
de27891f
SS
81/*
82 * This function may fail if:
83 *
84 * - dma_buf_vmap() fails
85 * E.g. due to lack of virtual mapping address space, or due to
86 * dmabuf->ops misconfiguration.
87 *
88 * - dma_vmap_noncontiguous() fails
89 * For instance, when requested buffer size is larger than totalram_pages().
90 * Relevant for buffers that use non-coherent memory.
91 *
92 * - Queue DMA attrs have DMA_ATTR_NO_KERNEL_MAPPING set
93 * Relevant for buffers that use coherent memory.
94 */
a4b83deb 95static void *vb2_dc_vaddr(struct vb2_buffer *vb, void *buf_priv)
40d8b766
LP
96{
97 struct vb2_dc_buf *buf = buf_priv;
98
de27891f
SS
99 if (buf->vaddr)
100 return buf->vaddr;
101
102 if (buf->db_attach) {
7938f421 103 struct iosys_map map;
de27891f 104
a26ee3b7 105 if (!dma_buf_vmap_unlocked(buf->db_attach->dmabuf, &map))
de27891f
SS
106 buf->vaddr = map.vaddr;
107
108 return buf->vaddr;
6619ccf1 109 }
6bbd4fec 110
de27891f
SS
111 if (buf->non_coherent_mem)
112 buf->vaddr = dma_vmap_noncontiguous(buf->dev, buf->size,
113 buf->dma_sgt);
40d8b766
LP
114 return buf->vaddr;
115}
116
117static unsigned int vb2_dc_num_users(void *buf_priv)
118{
119 struct vb2_dc_buf *buf = buf_priv;
120
6c4bb65d 121 return refcount_read(&buf->refcount);
40d8b766
LP
122}
123
199d101e
MS
124static void vb2_dc_prepare(void *buf_priv)
125{
126 struct vb2_dc_buf *buf = buf_priv;
127 struct sg_table *sgt = buf->dma_sgt;
128
de27891f 129 /* This takes care of DMABUF and user-enforced cache sync hint */
cde513fd
SS
130 if (buf->vb->skip_cache_sync_on_prepare)
131 return;
132
de27891f 133 if (!buf->non_coherent_mem)
199d101e
MS
134 return;
135
de27891f
SS
136 /* Non-coherent MMAP only */
137 if (buf->vaddr)
138 flush_kernel_vmap_range(buf->vaddr, buf->size);
55318914
SS
139
140 /* For both USERPTR and non-coherent MMAP */
141 dma_sync_sgtable_for_device(buf->dev, sgt, buf->dma_dir);
199d101e
MS
142}
143
144static void vb2_dc_finish(void *buf_priv)
145{
146 struct vb2_dc_buf *buf = buf_priv;
147 struct sg_table *sgt = buf->dma_sgt;
148
de27891f 149 /* This takes care of DMABUF and user-enforced cache sync hint */
cde513fd
SS
150 if (buf->vb->skip_cache_sync_on_finish)
151 return;
152
de27891f 153 if (!buf->non_coherent_mem)
199d101e
MS
154 return;
155
de27891f
SS
156 /* Non-coherent MMAP only */
157 if (buf->vaddr)
158 invalidate_kernel_vmap_range(buf->vaddr, buf->size);
55318914
SS
159
160 /* For both USERPTR and non-coherent MMAP */
161 dma_sync_sgtable_for_cpu(buf->dev, sgt, buf->dma_dir);
199d101e
MS
162}
163
40d8b766
LP
164/*********************************************/
165/* callbacks for MMAP buffers */
166/*********************************************/
167
168static void vb2_dc_put(void *buf_priv)
169{
170 struct vb2_dc_buf *buf = buf_priv;
171
6c4bb65d 172 if (!refcount_dec_and_test(&buf->refcount))
40d8b766
LP
173 return;
174
de27891f
SS
175 if (buf->non_coherent_mem) {
176 if (buf->vaddr)
177 dma_vunmap_noncontiguous(buf->dev, buf->vaddr);
178 dma_free_noncontiguous(buf->dev, buf->size,
179 buf->dma_sgt, buf->dma_dir);
180 } else {
181 if (buf->sgt_base) {
182 sg_free_table(buf->sgt_base);
183 kfree(buf->sgt_base);
184 }
185 dma_free_attrs(buf->dev, buf->size, buf->cookie,
186 buf->dma_addr, buf->attrs);
9ef2cbeb 187 }
67a5d0ce 188 put_device(buf->dev);
40d8b766
LP
189 kfree(buf);
190}
1a758d4e 191
de27891f
SS
192static int vb2_dc_alloc_coherent(struct vb2_dc_buf *buf)
193{
194 struct vb2_queue *q = buf->vb->vb2_queue;
195
196 buf->cookie = dma_alloc_attrs(buf->dev,
197 buf->size,
198 &buf->dma_addr,
199 GFP_KERNEL | q->gfp_flags,
200 buf->attrs);
201 if (!buf->cookie)
202 return -ENOMEM;
203
204 if (q->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING)
205 return 0;
206
207 buf->vaddr = buf->cookie;
208 return 0;
209}
210
211static int vb2_dc_alloc_non_coherent(struct vb2_dc_buf *buf)
212{
213 struct vb2_queue *q = buf->vb->vb2_queue;
214
215 buf->dma_sgt = dma_alloc_noncontiguous(buf->dev,
216 buf->size,
217 buf->dma_dir,
218 GFP_KERNEL | q->gfp_flags,
219 buf->attrs);
220 if (!buf->dma_sgt)
221 return -ENOMEM;
222
223 buf->dma_addr = sg_dma_address(buf->dma_sgt->sgl);
224
225 /*
226 * For non-coherent buffers the kernel mapping is created on demand
227 * in vb2_dc_vaddr().
228 */
229 return 0;
230}
231
a4b83deb
SS
232static void *vb2_dc_alloc(struct vb2_buffer *vb,
233 struct device *dev,
234 unsigned long size)
1a758d4e 235{
1a758d4e 236 struct vb2_dc_buf *buf;
de27891f 237 int ret;
1a758d4e 238
10791829
HV
239 if (WARN_ON(!dev))
240 return ERR_PTR(-EINVAL);
241
1a758d4e
PO
242 buf = kzalloc(sizeof *buf, GFP_KERNEL);
243 if (!buf)
244 return ERR_PTR(-ENOMEM);
245
a4b83deb 246 buf->attrs = vb->vb2_queue->dma_attrs;
de27891f
SS
247 buf->dma_dir = vb->vb2_queue->dma_dir;
248 buf->vb = vb;
249 buf->non_coherent_mem = vb->vb2_queue->non_coherent_mem;
ccc66e73 250
de27891f 251 buf->size = size;
67a5d0ce
TS
252 /* Prevent the device from being released while the buffer is used */
253 buf->dev = get_device(dev);
de27891f
SS
254
255 if (buf->non_coherent_mem)
256 ret = vb2_dc_alloc_non_coherent(buf);
257 else
258 ret = vb2_dc_alloc_coherent(buf);
259
260 if (ret) {
c9ee220d 261 dev_err(dev, "dma alloc of size %lu failed\n", size);
de27891f
SS
262 kfree(buf);
263 return ERR_PTR(-ENOMEM);
264 }
1a758d4e
PO
265
266 buf->handler.refcount = &buf->refcount;
f7f129ce 267 buf->handler.put = vb2_dc_put;
1a758d4e
PO
268 buf->handler.arg = buf;
269
6c4bb65d 270 refcount_set(&buf->refcount, 1);
1a758d4e
PO
271
272 return buf;
273}
274
f7f129ce 275static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma)
1a758d4e
PO
276{
277 struct vb2_dc_buf *buf = buf_priv;
c60520fa 278 int ret;
1a758d4e
PO
279
280 if (!buf) {
281 printk(KERN_ERR "No buffer to map\n");
282 return -EINVAL;
283 }
284
de27891f
SS
285 if (buf->non_coherent_mem)
286 ret = dma_mmap_noncontiguous(buf->dev, vma, buf->size,
287 buf->dma_sgt);
288 else
289 ret = dma_mmap_attrs(buf->dev, vma, buf->cookie, buf->dma_addr,
290 buf->size, buf->attrs);
c60520fa
MS
291 if (ret) {
292 pr_err("Remapping memory failed, error: %d\n", ret);
293 return ret;
294 }
295
1c71222e 296 vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP);
c60520fa
MS
297 vma->vm_private_data = &buf->handler;
298 vma->vm_ops = &vb2_common_vm_ops;
299
300 vma->vm_ops->open(vma);
301
c9ee220d
DM
302 pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %lu\n",
303 __func__, (unsigned long)buf->dma_addr, vma->vm_start,
304 buf->size);
c60520fa
MS
305
306 return 0;
1a758d4e
PO
307}
308
9ef2cbeb
TS
309/*********************************************/
310/* DMABUF ops for exporters */
311/*********************************************/
312
313struct vb2_dc_attachment {
314 struct sg_table sgt;
cd474037 315 enum dma_data_direction dma_dir;
9ef2cbeb
TS
316};
317
a19741e5 318static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf,
9ef2cbeb
TS
319 struct dma_buf_attachment *dbuf_attach)
320{
321 struct vb2_dc_attachment *attach;
322 unsigned int i;
323 struct scatterlist *rd, *wr;
324 struct sg_table *sgt;
325 struct vb2_dc_buf *buf = dbuf->priv;
326 int ret;
327
328 attach = kzalloc(sizeof(*attach), GFP_KERNEL);
329 if (!attach)
330 return -ENOMEM;
331
332 sgt = &attach->sgt;
333 /* Copy the buf->base_sgt scatter list to the attachment, as we can't
334 * map the same scatter list to multiple attachments at the same time.
335 */
336 ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL);
337 if (ret) {
338 kfree(attach);
339 return -ENOMEM;
340 }
341
342 rd = buf->sgt_base->sgl;
343 wr = sgt->sgl;
344 for (i = 0; i < sgt->orig_nents; ++i) {
345 sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
346 rd = sg_next(rd);
347 wr = sg_next(wr);
348 }
349
cd474037 350 attach->dma_dir = DMA_NONE;
9ef2cbeb
TS
351 dbuf_attach->priv = attach;
352
353 return 0;
354}
355
356static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf,
357 struct dma_buf_attachment *db_attach)
358{
359 struct vb2_dc_attachment *attach = db_attach->priv;
360 struct sg_table *sgt;
361
362 if (!attach)
363 return;
364
365 sgt = &attach->sgt;
366
367 /* release the scatterlist cache */
cd474037 368 if (attach->dma_dir != DMA_NONE)
596a5a58
LS
369 /*
370 * Cache sync can be skipped here, as the vb2_dc memory is
371 * allocated from device coherent memory, which means the
372 * memory locations do not require any explicit cache
373 * maintenance prior or after being used by the device.
374 */
8b7c0280
MS
375 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
376 DMA_ATTR_SKIP_CPU_SYNC);
9ef2cbeb
TS
377 sg_free_table(sgt);
378 kfree(attach);
379 db_attach->priv = NULL;
380}
381
382static struct sg_table *vb2_dc_dmabuf_ops_map(
cd474037 383 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
9ef2cbeb
TS
384{
385 struct vb2_dc_attachment *attach = db_attach->priv;
9ef2cbeb 386 struct sg_table *sgt;
9ef2cbeb 387
9ef2cbeb
TS
388 sgt = &attach->sgt;
389 /* return previously mapped sg table */
23543b3c 390 if (attach->dma_dir == dma_dir)
9ef2cbeb 391 return sgt;
9ef2cbeb
TS
392
393 /* release any previous cache */
cd474037 394 if (attach->dma_dir != DMA_NONE) {
8b7c0280
MS
395 dma_unmap_sgtable(db_attach->dev, sgt, attach->dma_dir,
396 DMA_ATTR_SKIP_CPU_SYNC);
cd474037 397 attach->dma_dir = DMA_NONE;
9ef2cbeb
TS
398 }
399
596a5a58
LS
400 /*
401 * mapping to the client with new direction, no cache sync
402 * required see comment in vb2_dc_dmabuf_ops_detach()
403 */
8b7c0280
MS
404 if (dma_map_sgtable(db_attach->dev, sgt, dma_dir,
405 DMA_ATTR_SKIP_CPU_SYNC)) {
9ef2cbeb 406 pr_err("failed to map scatterlist\n");
9ef2cbeb
TS
407 return ERR_PTR(-EIO);
408 }
409
cd474037 410 attach->dma_dir = dma_dir;
9ef2cbeb 411
9ef2cbeb
TS
412 return sgt;
413}
414
415static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
cd474037 416 struct sg_table *sgt, enum dma_data_direction dma_dir)
9ef2cbeb
TS
417{
418 /* nothing to be done here */
419}
420
421static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf)
422{
423 /* drop reference obtained in vb2_dc_get_dmabuf */
424 vb2_dc_put(dbuf->priv);
425}
426
d5adf1b0
SS
427static int
428vb2_dc_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf,
429 enum dma_data_direction direction)
430{
d5adf1b0
SS
431 return 0;
432}
433
434static int
435vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf,
436 enum dma_data_direction direction)
437{
d5adf1b0
SS
438 return 0;
439}
440
7938f421 441static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct iosys_map *map)
9ef2cbeb 442{
de27891f
SS
443 struct vb2_dc_buf *buf;
444 void *vaddr;
9ef2cbeb 445
de27891f
SS
446 buf = dbuf->priv;
447 vaddr = vb2_dc_vaddr(buf->vb, buf);
448 if (!vaddr)
449 return -EINVAL;
450
7938f421 451 iosys_map_set_vaddr(map, vaddr);
6619ccf1
TZ
452
453 return 0;
9ef2cbeb
TS
454}
455
456static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf,
457 struct vm_area_struct *vma)
458{
3a6ca181
DO
459 dma_resv_assert_held(dbuf->resv);
460
9ef2cbeb
TS
461 return vb2_dc_mmap(dbuf->priv, vma);
462}
463
6e03db38 464static const struct dma_buf_ops vb2_dc_dmabuf_ops = {
9ef2cbeb
TS
465 .attach = vb2_dc_dmabuf_ops_attach,
466 .detach = vb2_dc_dmabuf_ops_detach,
467 .map_dma_buf = vb2_dc_dmabuf_ops_map,
468 .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap,
d5adf1b0
SS
469 .begin_cpu_access = vb2_dc_dmabuf_ops_begin_cpu_access,
470 .end_cpu_access = vb2_dc_dmabuf_ops_end_cpu_access,
9ef2cbeb
TS
471 .vmap = vb2_dc_dmabuf_ops_vmap,
472 .mmap = vb2_dc_dmabuf_ops_mmap,
473 .release = vb2_dc_dmabuf_ops_release,
474};
475
476static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf)
477{
478 int ret;
479 struct sg_table *sgt;
480
de27891f
SS
481 if (buf->non_coherent_mem)
482 return buf->dma_sgt;
483
9ef2cbeb
TS
484 sgt = kmalloc(sizeof(*sgt), GFP_KERNEL);
485 if (!sgt) {
486 dev_err(buf->dev, "failed to alloc sg table\n");
487 return NULL;
488 }
489
ccc66e73 490 ret = dma_get_sgtable_attrs(buf->dev, sgt, buf->cookie, buf->dma_addr,
00085f1e 491 buf->size, buf->attrs);
9ef2cbeb
TS
492 if (ret < 0) {
493 dev_err(buf->dev, "failed to get scatterlist from DMA API\n");
494 kfree(sgt);
495 return NULL;
496 }
497
498 return sgt;
499}
500
a4b83deb
SS
501static struct dma_buf *vb2_dc_get_dmabuf(struct vb2_buffer *vb,
502 void *buf_priv,
503 unsigned long flags)
9ef2cbeb
TS
504{
505 struct vb2_dc_buf *buf = buf_priv;
506 struct dma_buf *dbuf;
d8fbe341
SS
507 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
508
509 exp_info.ops = &vb2_dc_dmabuf_ops;
510 exp_info.size = buf->size;
511 exp_info.flags = flags;
512 exp_info.priv = buf;
9ef2cbeb
TS
513
514 if (!buf->sgt_base)
515 buf->sgt_base = vb2_dc_get_base_sgt(buf);
516
517 if (WARN_ON(!buf->sgt_base))
518 return NULL;
519
d8fbe341 520 dbuf = dma_buf_export(&exp_info);
9ef2cbeb
TS
521 if (IS_ERR(dbuf))
522 return NULL;
523
524 /* dmabuf keeps reference to vb2 buffer */
6c4bb65d 525 refcount_inc(&buf->refcount);
9ef2cbeb
TS
526
527 return dbuf;
528}
529
40d8b766
LP
530/*********************************************/
531/* callbacks for USERPTR buffers */
532/*********************************************/
533
e15dab75
TS
534static void vb2_dc_put_userptr(void *buf_priv)
535{
536 struct vb2_dc_buf *buf = buf_priv;
537 struct sg_table *sgt = buf->dma_sgt;
fb639eb3
JK
538 int i;
539 struct page **pages;
e15dab75 540
774d2301 541 if (sgt) {
251a79f8
HV
542 /*
543 * No need to sync to CPU, it's already synced to the CPU
544 * since the finish() memop will have been called before this.
545 */
8b7c0280
MS
546 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir,
547 DMA_ATTR_SKIP_CPU_SYNC);
fb639eb3
JK
548 pages = frame_vector_pages(buf->vec);
549 /* sgt should exist only if vector contains pages... */
550 BUG_ON(IS_ERR(pages));
c0cb7658
SV
551 if (buf->dma_dir == DMA_FROM_DEVICE ||
552 buf->dma_dir == DMA_BIDIRECTIONAL)
553 for (i = 0; i < frame_vector_count(buf->vec); i++)
554 set_page_dirty_lock(pages[i]);
774d2301
MS
555 sg_free_table(sgt);
556 kfree(sgt);
55ea5444
CH
557 } else {
558 dma_unmap_resource(buf->dev, buf->dma_addr, buf->size,
559 buf->dma_dir, 0);
774d2301 560 }
fb639eb3 561 vb2_destroy_framevec(buf->vec);
e15dab75
TS
562 kfree(buf);
563}
564
a4b83deb
SS
565static void *vb2_dc_get_userptr(struct vb2_buffer *vb, struct device *dev,
566 unsigned long vaddr, unsigned long size)
1a758d4e
PO
567{
568 struct vb2_dc_buf *buf;
fb639eb3 569 struct frame_vector *vec;
c4860ad6 570 unsigned int offset;
fb639eb3 571 int n_pages, i;
e15dab75 572 int ret = 0;
e15dab75
TS
573 struct sg_table *sgt;
574 unsigned long contig_size;
d81e870d
MS
575 unsigned long dma_align = dma_get_cache_alignment();
576
577 /* Only cache aligned DMA transfers are reliable */
578 if (!IS_ALIGNED(vaddr | size, dma_align)) {
579 pr_debug("user data must be aligned to %lu bytes\n", dma_align);
580 return ERR_PTR(-EINVAL);
581 }
582
583 if (!size) {
584 pr_debug("size is zero\n");
585 return ERR_PTR(-EINVAL);
586 }
1a758d4e 587
10791829
HV
588 if (WARN_ON(!dev))
589 return ERR_PTR(-EINVAL);
590
1a758d4e
PO
591 buf = kzalloc(sizeof *buf, GFP_KERNEL);
592 if (!buf)
593 return ERR_PTR(-ENOMEM);
594
36c0f8b3 595 buf->dev = dev;
a4b83deb 596 buf->dma_dir = vb->vb2_queue->dma_dir;
67f85135 597 buf->vb = vb;
e15dab75 598
c4860ad6 599 offset = lower_32_bits(offset_in_page(vaddr));
e2fc6edd
HV
600 vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE ||
601 buf->dma_dir == DMA_BIDIRECTIONAL);
fb639eb3
JK
602 if (IS_ERR(vec)) {
603 ret = PTR_ERR(vec);
e15dab75
TS
604 goto fail_buf;
605 }
fb639eb3
JK
606 buf->vec = vec;
607 n_pages = frame_vector_count(vec);
608 ret = frame_vector_to_pages(vec);
609 if (ret < 0) {
610 unsigned long *nums = frame_vector_pfns(vec);
e15dab75 611
fb639eb3
JK
612 /*
613 * Failed to convert to pages... Check the memory is physically
614 * contiguous and use direct mapping
615 */
616 for (i = 1; i < n_pages; i++)
617 if (nums[i-1] + 1 != nums[i])
618 goto fail_pfnvec;
55ea5444
CH
619 buf->dma_addr = dma_map_resource(buf->dev,
620 __pfn_to_phys(nums[0]), size, buf->dma_dir, 0);
621 if (dma_mapping_error(buf->dev, buf->dma_addr)) {
622 ret = -ENOMEM;
623 goto fail_pfnvec;
624 }
fb639eb3 625 goto out;
e15dab75
TS
626 }
627
628 sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
629 if (!sgt) {
630 pr_err("failed to allocate sg table\n");
631 ret = -ENOMEM;
fb639eb3 632 goto fail_pfnvec;
e15dab75
TS
633 }
634
fb639eb3 635 ret = sg_alloc_table_from_pages(sgt, frame_vector_pages(vec), n_pages,
e15dab75
TS
636 offset, size, GFP_KERNEL);
637 if (ret) {
638 pr_err("failed to initialize sg table\n");
639 goto fail_sgt;
640 }
641
251a79f8
HV
642 /*
643 * No need to sync to the device, this will happen later when the
644 * prepare() memop is called.
645 */
8b7c0280
MS
646 if (dma_map_sgtable(buf->dev, sgt, buf->dma_dir,
647 DMA_ATTR_SKIP_CPU_SYNC)) {
e15dab75
TS
648 pr_err("failed to map scatterlist\n");
649 ret = -EIO;
650 goto fail_sgt_init;
651 }
652
653 contig_size = vb2_dc_get_contiguous_size(sgt);
654 if (contig_size < size) {
655 pr_err("contiguous mapping is too small %lu/%lu\n",
656 contig_size, size);
657 ret = -EFAULT;
658 goto fail_map_sg;
1a758d4e
PO
659 }
660
e15dab75 661 buf->dma_addr = sg_dma_address(sgt->sgl);
e15dab75 662 buf->dma_sgt = sgt;
de27891f
SS
663 buf->non_coherent_mem = 1;
664
fb639eb3
JK
665out:
666 buf->size = size;
1a758d4e
PO
667
668 return buf;
1a758d4e 669
e15dab75 670fail_map_sg:
8b7c0280 671 dma_unmap_sgtable(buf->dev, sgt, buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
1a758d4e 672
e15dab75 673fail_sgt_init:
e15dab75
TS
674 sg_free_table(sgt);
675
676fail_sgt:
677 kfree(sgt);
1a758d4e 678
fb639eb3
JK
679fail_pfnvec:
680 vb2_destroy_framevec(vec);
e15dab75
TS
681
682fail_buf:
1a758d4e 683 kfree(buf);
e15dab75
TS
684
685 return ERR_PTR(ret);
1a758d4e
PO
686}
687
8c417d03
SS
688/*********************************************/
689/* callbacks for DMABUF buffers */
690/*********************************************/
691
692static int vb2_dc_map_dmabuf(void *mem_priv)
693{
694 struct vb2_dc_buf *buf = mem_priv;
695 struct sg_table *sgt;
696 unsigned long contig_size;
697
698 if (WARN_ON(!buf->db_attach)) {
699 pr_err("trying to pin a non attached buffer\n");
700 return -EINVAL;
701 }
702
703 if (WARN_ON(buf->dma_sgt)) {
704 pr_err("dmabuf buffer is already pinned\n");
705 return 0;
706 }
707
708 /* get the associated scatterlist for this buffer */
a26ee3b7 709 sgt = dma_buf_map_attachment_unlocked(buf->db_attach, buf->dma_dir);
fee0c54e 710 if (IS_ERR(sgt)) {
8c417d03
SS
711 pr_err("Error getting dmabuf scatterlist\n");
712 return -EINVAL;
713 }
714
715 /* checking if dmabuf is big enough to store contiguous chunk */
716 contig_size = vb2_dc_get_contiguous_size(sgt);
717 if (contig_size < buf->size) {
364152dd
HV
718 pr_err("contiguous chunk is too small %lu/%lu\n",
719 contig_size, buf->size);
a26ee3b7
DO
720 dma_buf_unmap_attachment_unlocked(buf->db_attach, sgt,
721 buf->dma_dir);
8c417d03
SS
722 return -EFAULT;
723 }
724
725 buf->dma_addr = sg_dma_address(sgt->sgl);
726 buf->dma_sgt = sgt;
6bbd4fec 727 buf->vaddr = NULL;
8c417d03
SS
728
729 return 0;
730}
731
732static void vb2_dc_unmap_dmabuf(void *mem_priv)
733{
734 struct vb2_dc_buf *buf = mem_priv;
735 struct sg_table *sgt = buf->dma_sgt;
7938f421 736 struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
8c417d03
SS
737
738 if (WARN_ON(!buf->db_attach)) {
739 pr_err("trying to unpin a not attached buffer\n");
740 return;
741 }
742
743 if (WARN_ON(!sgt)) {
744 pr_err("dmabuf buffer is already unpinned\n");
745 return;
746 }
747
6bbd4fec 748 if (buf->vaddr) {
a26ee3b7 749 dma_buf_vunmap_unlocked(buf->db_attach->dmabuf, &map);
6bbd4fec
PZ
750 buf->vaddr = NULL;
751 }
a26ee3b7 752 dma_buf_unmap_attachment_unlocked(buf->db_attach, sgt, buf->dma_dir);
8c417d03
SS
753
754 buf->dma_addr = 0;
755 buf->dma_sgt = NULL;
756}
757
758static void vb2_dc_detach_dmabuf(void *mem_priv)
759{
760 struct vb2_dc_buf *buf = mem_priv;
761
762 /* if vb2 works correctly you should never detach mapped buffer */
763 if (WARN_ON(buf->dma_addr))
764 vb2_dc_unmap_dmabuf(buf);
765
766 /* detach this attachment */
767 dma_buf_detach(buf->db_attach->dmabuf, buf->db_attach);
768 kfree(buf);
769}
770
a4b83deb
SS
771static void *vb2_dc_attach_dmabuf(struct vb2_buffer *vb, struct device *dev,
772 struct dma_buf *dbuf, unsigned long size)
8c417d03 773{
8c417d03
SS
774 struct vb2_dc_buf *buf;
775 struct dma_buf_attachment *dba;
776
777 if (dbuf->size < size)
778 return ERR_PTR(-EFAULT);
779
10791829
HV
780 if (WARN_ON(!dev))
781 return ERR_PTR(-EINVAL);
782
8c417d03
SS
783 buf = kzalloc(sizeof(*buf), GFP_KERNEL);
784 if (!buf)
785 return ERR_PTR(-ENOMEM);
786
36c0f8b3 787 buf->dev = dev;
67f85135
SS
788 buf->vb = vb;
789
8c417d03
SS
790 /* create attachment for the dmabuf with the user device */
791 dba = dma_buf_attach(dbuf, buf->dev);
792 if (IS_ERR(dba)) {
793 pr_err("failed to attach dmabuf\n");
794 kfree(buf);
795 return dba;
796 }
797
a4b83deb 798 buf->dma_dir = vb->vb2_queue->dma_dir;
8c417d03
SS
799 buf->size = size;
800 buf->db_attach = dba;
801
802 return buf;
803}
804
40d8b766
LP
805/*********************************************/
806/* DMA CONTIG exported functions */
807/*********************************************/
808
1a758d4e 809const struct vb2_mem_ops vb2_dma_contig_memops = {
f7f129ce
LP
810 .alloc = vb2_dc_alloc,
811 .put = vb2_dc_put,
9ef2cbeb 812 .get_dmabuf = vb2_dc_get_dmabuf,
f7f129ce
LP
813 .cookie = vb2_dc_cookie,
814 .vaddr = vb2_dc_vaddr,
815 .mmap = vb2_dc_mmap,
816 .get_userptr = vb2_dc_get_userptr,
817 .put_userptr = vb2_dc_put_userptr,
199d101e
MS
818 .prepare = vb2_dc_prepare,
819 .finish = vb2_dc_finish,
8c417d03
SS
820 .map_dmabuf = vb2_dc_map_dmabuf,
821 .unmap_dmabuf = vb2_dc_unmap_dmabuf,
822 .attach_dmabuf = vb2_dc_attach_dmabuf,
823 .detach_dmabuf = vb2_dc_detach_dmabuf,
f7f129ce 824 .num_users = vb2_dc_num_users,
1a758d4e
PO
825};
826EXPORT_SYMBOL_GPL(vb2_dma_contig_memops);
827
3f033969
MS
828/**
829 * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size
830 * @dev: device for configuring DMA parameters
831 * @size: size of DMA max segment size to set
832 *
833 * To allow mapping the scatter-list into a single chunk in the DMA
834 * address space, the device is required to have the DMA max segment
835 * size parameter set to a value larger than the buffer size. Otherwise,
836 * the DMA-mapping subsystem will split the mapping into max segment
837 * size chunks. This function sets the DMA max segment size
838 * parameter to let DMA-mapping map a buffer as a single chunk in DMA
839 * address space.
840 * This code assumes that the DMA-mapping subsystem will merge all
841 * scatterlist segments if this is really possible (for example when
842 * an IOMMU is available and enabled).
843 * Ideally, this parameter should be set by the generic bus code, but it
844 * is left with the default 64KiB value due to historical litmiations in
845 * other subsystems (like limited USB host drivers) and there no good
846 * place to set it to the proper value.
847 * This function should be called from the drivers, which are known to
848 * operate on platforms with IOMMU and provide access to shared buffers
849 * (either USERPTR or DMABUF). This should be done before initializing
850 * videobuf2 queue.
851 */
852int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size)
853{
854 if (!dev->dma_parms) {
0d966872
TV
855 dev_err(dev, "Failed to set max_seg_size: dma_parms is NULL\n");
856 return -ENODEV;
3f033969
MS
857 }
858 if (dma_get_max_seg_size(dev) < size)
859 return dma_set_max_seg_size(dev, size);
860
861 return 0;
862}
863EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size);
864
1a758d4e 865MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2");
95072084 866MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
1a758d4e 867MODULE_LICENSE("GPL");
16b0314a 868MODULE_IMPORT_NS(DMA_BUF);