mmap locking API: use coccinelle to convert mmap_sem rwsem call sites
[linux-block.git] / drivers / media / v4l2-core / videobuf-dma-contig.c
CommitLineData
77512baa 1// SPDX-License-Identifier: GPL-2.0-only
2cc45cf2
MD
2/*
3 * helper functions for physically contiguous capture buffers
4 *
5 * The functions support hardware lacking scatter gather support
6 * (i.e. the buffers must be linear in physical memory)
7 *
8 * Copyright (c) 2008 Magnus Damm
9 *
10 * Based on videobuf-vmalloc.c,
32590819 11 * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
2cc45cf2
MD
12 */
13
14#include <linux/init.h>
15#include <linux/module.h>
f19ad390 16#include <linux/mm.h>
720b17e7 17#include <linux/pagemap.h>
2cc45cf2 18#include <linux/dma-mapping.h>
f39c1ab3 19#include <linux/sched.h>
5a0e3ad6 20#include <linux/slab.h>
2cc45cf2
MD
21#include <media/videobuf-dma-contig.h>
22
23struct videobuf_dma_contig_memory {
24 u32 magic;
25 void *vaddr;
26 dma_addr_t dma_handle;
27 unsigned long size;
28};
29
30#define MAGIC_DC_MEM 0x0733ac61
c60f2b5c
GL
31#define MAGIC_CHECK(is, should) \
32 if (unlikely((is) != (should))) { \
33 pr_err("magic mismatch: %x expected %x\n", (is), (should)); \
34 BUG(); \
2cc45cf2
MD
35 }
36
a8f3c203
FV
37static int __videobuf_dc_alloc(struct device *dev,
38 struct videobuf_dma_contig_memory *mem,
50fbe32c 39 unsigned long size, gfp_t flags)
a8f3c203
FV
40{
41 mem->size = size;
cb132cd5
MCC
42 mem->vaddr = dma_alloc_coherent(dev, mem->size,
43 &mem->dma_handle, flags);
a8f3c203
FV
44
45 if (!mem->vaddr) {
46 dev_err(dev, "memory alloc size %ld failed\n", mem->size);
47 return -ENOMEM;
48 }
49
50 dev_dbg(dev, "dma mapped data is at %p (%ld)\n", mem->vaddr, mem->size);
51
52 return 0;
53}
54
55static void __videobuf_dc_free(struct device *dev,
56 struct videobuf_dma_contig_memory *mem)
57{
cb132cd5 58 dma_free_coherent(dev, mem->size, mem->vaddr, mem->dma_handle);
a8f3c203
FV
59
60 mem->vaddr = NULL;
61}
62
63static void videobuf_vm_open(struct vm_area_struct *vma)
2cc45cf2
MD
64{
65 struct videobuf_mapping *map = vma->vm_private_data;
66
cca36e2e 67 dev_dbg(map->q->dev, "vm_open %p [count=%u,vma=%08lx-%08lx]\n",
2cc45cf2
MD
68 map, map->count, vma->vm_start, vma->vm_end);
69
70 map->count++;
71}
72
73static void videobuf_vm_close(struct vm_area_struct *vma)
74{
75 struct videobuf_mapping *map = vma->vm_private_data;
76 struct videobuf_queue *q = map->q;
77 int i;
78
f35f1bb8 79 dev_dbg(q->dev, "vm_close %p [count=%u,vma=%08lx-%08lx]\n",
2cc45cf2
MD
80 map, map->count, vma->vm_start, vma->vm_end);
81
cca36e2e
HV
82 map->count--;
83 if (0 == map->count) {
2cc45cf2
MD
84 struct videobuf_dma_contig_memory *mem;
85
f35f1bb8 86 dev_dbg(q->dev, "munmap %p q=%p\n", map, q);
cca36e2e 87 videobuf_queue_lock(q);
2cc45cf2
MD
88
89 /* We need first to cancel streams, before unmapping */
90 if (q->streaming)
91 videobuf_queue_cancel(q);
92
93 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
94 if (NULL == q->bufs[i])
95 continue;
96
97 if (q->bufs[i]->map != map)
98 continue;
99
100 mem = q->bufs[i]->priv;
101 if (mem) {
102 /* This callback is called only if kernel has
103 allocated memory and this memory is mmapped.
104 In this case, memory should be freed,
105 in order to do memory unmap.
106 */
107
108 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
109
110 /* vfree is not atomic - can't be
111 called with IRQ's disabled
112 */
f35f1bb8 113 dev_dbg(q->dev, "buf[%d] freeing %p\n",
2cc45cf2
MD
114 i, mem->vaddr);
115
a8f3c203 116 __videobuf_dc_free(q->dev, mem);
2cc45cf2
MD
117 mem->vaddr = NULL;
118 }
119
a8f3c203 120 q->bufs[i]->map = NULL;
2cc45cf2
MD
121 q->bufs[i]->baddr = 0;
122 }
123
124 kfree(map);
125
cca36e2e 126 videobuf_queue_unlock(q);
2cc45cf2
MD
127 }
128}
129
f0f37e2f 130static const struct vm_operations_struct videobuf_vm_ops = {
a8f3c203
FV
131 .open = videobuf_vm_open,
132 .close = videobuf_vm_close,
2cc45cf2
MD
133};
134
720b17e7
MD
135/**
136 * videobuf_dma_contig_user_put() - reset pointer to user space buffer
137 * @mem: per-buffer private videobuf-dma-contig data
138 *
139 * This function resets the user space pointer
140 */
141static void videobuf_dma_contig_user_put(struct videobuf_dma_contig_memory *mem)
142{
720b17e7
MD
143 mem->dma_handle = 0;
144 mem->size = 0;
145}
146
147/**
148 * videobuf_dma_contig_user_get() - setup user space memory pointer
149 * @mem: per-buffer private videobuf-dma-contig data
150 * @vb: video buffer to map
151 *
152 * This function validates and sets up a pointer to user space memory.
153 * Only physically contiguous pfn-mapped memory is accepted.
154 *
155 * Returns 0 if successful.
156 */
157static int videobuf_dma_contig_user_get(struct videobuf_dma_contig_memory *mem,
158 struct videobuf_buffer *vb)
159{
e275faf3 160 unsigned long untagged_baddr = untagged_addr(vb->baddr);
720b17e7
MD
161 struct mm_struct *mm = current->mm;
162 struct vm_area_struct *vma;
163 unsigned long prev_pfn, this_pfn;
164 unsigned long pages_done, user_address;
31bedfa5 165 unsigned int offset;
720b17e7
MD
166 int ret;
167
e275faf3 168 offset = untagged_baddr & ~PAGE_MASK;
31bedfa5 169 mem->size = PAGE_ALIGN(vb->size + offset);
720b17e7
MD
170 ret = -EINVAL;
171
d8ed45c5 172 mmap_read_lock(mm);
720b17e7 173
e275faf3 174 vma = find_vma(mm, untagged_baddr);
720b17e7
MD
175 if (!vma)
176 goto out_up;
177
e275faf3 178 if ((untagged_baddr + mem->size) > vma->vm_end)
720b17e7
MD
179 goto out_up;
180
181 pages_done = 0;
182 prev_pfn = 0; /* kill warning */
e275faf3 183 user_address = untagged_baddr;
720b17e7
MD
184
185 while (pages_done < (mem->size >> PAGE_SHIFT)) {
186 ret = follow_pfn(vma, user_address, &this_pfn);
187 if (ret)
188 break;
189
190 if (pages_done == 0)
31bedfa5 191 mem->dma_handle = (this_pfn << PAGE_SHIFT) + offset;
720b17e7
MD
192 else if (this_pfn != (prev_pfn + 1))
193 ret = -EFAULT;
194
195 if (ret)
196 break;
197
198 prev_pfn = this_pfn;
199 user_address += PAGE_SIZE;
200 pages_done++;
201 }
202
a8f3c203 203out_up:
d8ed45c5 204 mmap_read_unlock(current->mm);
720b17e7
MD
205
206 return ret;
207}
208
cb132cd5 209static struct videobuf_buffer *__videobuf_alloc(size_t size)
2cc45cf2
MD
210{
211 struct videobuf_dma_contig_memory *mem;
212 struct videobuf_buffer *vb;
213
214 vb = kzalloc(size + sizeof(*mem), GFP_KERNEL);
215 if (vb) {
a8f3c203
FV
216 vb->priv = ((char *)vb) + size;
217 mem = vb->priv;
2cc45cf2
MD
218 mem->magic = MAGIC_DC_MEM;
219 }
220
221 return vb;
222}
223
037c75eb 224static void *__videobuf_to_vaddr(struct videobuf_buffer *buf)
2cc45cf2
MD
225{
226 struct videobuf_dma_contig_memory *mem = buf->priv;
227
228 BUG_ON(!mem);
229 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
230
231 return mem->vaddr;
232}
233
234static int __videobuf_iolock(struct videobuf_queue *q,
235 struct videobuf_buffer *vb,
236 struct v4l2_framebuffer *fbuf)
237{
238 struct videobuf_dma_contig_memory *mem = vb->priv;
239
240 BUG_ON(!mem);
241 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
242
243 switch (vb->memory) {
244 case V4L2_MEMORY_MMAP:
245 dev_dbg(q->dev, "%s memory method MMAP\n", __func__);
246
247 /* All handling should be done by __videobuf_mmap_mapper() */
248 if (!mem->vaddr) {
4faf7066 249 dev_err(q->dev, "memory is not allocated/mmapped.\n");
2cc45cf2
MD
250 return -EINVAL;
251 }
252 break;
253 case V4L2_MEMORY_USERPTR:
254 dev_dbg(q->dev, "%s memory method USERPTR\n", __func__);
255
720b17e7 256 /* handle pointer from user space */
2cc45cf2 257 if (vb->baddr)
720b17e7 258 return videobuf_dma_contig_user_get(mem, vb);
2cc45cf2 259
720b17e7 260 /* allocate memory for the read() method */
a8f3c203
FV
261 if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(vb->size),
262 GFP_KERNEL))
2cc45cf2 263 return -ENOMEM;
2cc45cf2
MD
264 break;
265 case V4L2_MEMORY_OVERLAY:
266 default:
a8f3c203 267 dev_dbg(q->dev, "%s memory method OVERLAY/unknown\n", __func__);
2cc45cf2
MD
268 return -EINVAL;
269 }
270
271 return 0;
272}
273
2cc45cf2 274static int __videobuf_mmap_mapper(struct videobuf_queue *q,
0b62b737 275 struct videobuf_buffer *buf,
2cc45cf2
MD
276 struct vm_area_struct *vma)
277{
278 struct videobuf_dma_contig_memory *mem;
279 struct videobuf_mapping *map;
2cc45cf2 280 int retval;
2cc45cf2
MD
281
282 dev_dbg(q->dev, "%s\n", __func__);
2cc45cf2
MD
283
284 /* create mapping + update buffer list */
285 map = kzalloc(sizeof(struct videobuf_mapping), GFP_KERNEL);
286 if (!map)
287 return -ENOMEM;
288
0b62b737 289 buf->map = map;
2cc45cf2
MD
290 map->q = q;
291
0b62b737 292 buf->baddr = vma->vm_start;
2cc45cf2 293
0b62b737 294 mem = buf->priv;
2cc45cf2
MD
295 BUG_ON(!mem);
296 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
297
a8f3c203
FV
298 if (__videobuf_dc_alloc(q->dev, mem, PAGE_ALIGN(buf->bsize),
299 GFP_KERNEL | __GFP_COMP))
2cc45cf2 300 goto error;
2cc45cf2
MD
301
302 /* Try to remap memory */
cb132cd5 303 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
8a6a547f
FF
304
305 /* the "vm_pgoff" is just used in v4l2 to find the
306 * corresponding buffer data structure which is allocated
307 * earlier and it does not mean the offset from the physical
308 * buffer start address as usual. So set it to 0 to pass
309 * the sanity check in vm_iomap_memory().
310 */
311 vma->vm_pgoff = 0;
312
83570621 313 retval = vm_iomap_memory(vma, mem->dma_handle, mem->size);
cb132cd5
MCC
314 if (retval) {
315 dev_err(q->dev, "mmap: remap failed with error %d. ",
316 retval);
317 dma_free_coherent(q->dev, mem->size,
318 mem->vaddr, mem->dma_handle);
319 goto error;
2cc45cf2
MD
320 }
321
a8f3c203
FV
322 vma->vm_ops = &videobuf_vm_ops;
323 vma->vm_flags |= VM_DONTEXPAND;
2cc45cf2
MD
324 vma->vm_private_data = map;
325
326 dev_dbg(q->dev, "mmap %p: q=%p %08lx-%08lx (%lx) pgoff %08lx buf %d\n",
327 map, q, vma->vm_start, vma->vm_end,
a8f3c203 328 (long int)buf->bsize, vma->vm_pgoff, buf->i);
2cc45cf2
MD
329
330 videobuf_vm_open(vma);
331
332 return 0;
333
334error:
335 kfree(map);
336 return -ENOMEM;
337}
338
2cc45cf2 339static struct videobuf_qtype_ops qops = {
a8f3c203 340 .magic = MAGIC_QTYPE_OPS,
cb132cd5 341 .alloc_vb = __videobuf_alloc,
a8f3c203 342 .iolock = __videobuf_iolock,
a8f3c203
FV
343 .mmap_mapper = __videobuf_mmap_mapper,
344 .vaddr = __videobuf_to_vaddr,
2cc45cf2
MD
345};
346
347void videobuf_queue_dma_contig_init(struct videobuf_queue *q,
38a54f35 348 const struct videobuf_queue_ops *ops,
2cc45cf2
MD
349 struct device *dev,
350 spinlock_t *irqlock,
351 enum v4l2_buf_type type,
352 enum v4l2_field field,
353 unsigned int msize,
08bff03e
HV
354 void *priv,
355 struct mutex *ext_lock)
2cc45cf2
MD
356{
357 videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize,
08bff03e 358 priv, &qops, ext_lock);
2cc45cf2
MD
359}
360EXPORT_SYMBOL_GPL(videobuf_queue_dma_contig_init);
361
362dma_addr_t videobuf_to_dma_contig(struct videobuf_buffer *buf)
363{
364 struct videobuf_dma_contig_memory *mem = buf->priv;
365
366 BUG_ON(!mem);
367 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
368
369 return mem->dma_handle;
370}
371EXPORT_SYMBOL_GPL(videobuf_to_dma_contig);
372
373void videobuf_dma_contig_free(struct videobuf_queue *q,
374 struct videobuf_buffer *buf)
375{
376 struct videobuf_dma_contig_memory *mem = buf->priv;
377
378 /* mmapped memory can't be freed here, otherwise mmapped region
379 would be released, while still needed. In this case, the memory
380 release should happen inside videobuf_vm_close().
381 So, it should free memory only if the memory were allocated for
382 read() operation.
383 */
720b17e7 384 if (buf->memory != V4L2_MEMORY_USERPTR)
2cc45cf2
MD
385 return;
386
387 if (!mem)
388 return;
389
390 MAGIC_CHECK(mem->magic, MAGIC_DC_MEM);
391
720b17e7
MD
392 /* handle user space pointer case */
393 if (buf->baddr) {
394 videobuf_dma_contig_user_put(mem);
395 return;
396 }
397
398 /* read() method */
b2b476f5 399 if (mem->vaddr) {
a8f3c203 400 __videobuf_dc_free(q->dev, mem);
b2b476f5
PO
401 mem->vaddr = NULL;
402 }
2cc45cf2
MD
403}
404EXPORT_SYMBOL_GPL(videobuf_dma_contig_free);
405
406MODULE_DESCRIPTION("helper module to manage video4linux dma contig buffers");
407MODULE_AUTHOR("Magnus Damm");
408MODULE_LICENSE("GPL");