1 // SPDX-License-Identifier: GPL-2.0-only
3 * generic helper functions for handling video4linux capture buffers
5 * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
7 * Highly based on video-buf written originally by:
8 * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
9 * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
10 * (c) 2006 Ted Walther and John Sokol
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/interrupt.h>
21 #include <media/videobuf-core.h>
22 #include <media/v4l2-common.h>
24 #define MAGIC_BUFFER 0x20070728
25 #define MAGIC_CHECK(is, should) \
27 if (unlikely((is) != (should))) { \
29 "magic mismatch: %x (expected %x)\n", \
36 module_param(debug, int, 0644);
38 MODULE_DESCRIPTION("helper module to manage video4linux buffers");
39 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
40 MODULE_LICENSE("GPL");
42 #define dprintk(level, fmt, arg...) \
45 printk(KERN_DEBUG "vbuf: " fmt, ## arg); \
48 /* --------------------------------------------------------------------- */
50 #define CALL(q, f, arg...) \
51 ((q->int_ops->f) ? q->int_ops->f(arg) : 0)
52 #define CALLPTR(q, f, arg...) \
53 ((q->int_ops->f) ? q->int_ops->f(arg) : NULL)
55 struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q)
57 struct videobuf_buffer *vb;
59 BUG_ON(q->msize < sizeof(*vb));
61 if (!q->int_ops || !q->int_ops->alloc_vb) {
62 printk(KERN_ERR "No specific ops defined!\n");
66 vb = q->int_ops->alloc_vb(q->msize);
68 init_waitqueue_head(&vb->done);
69 vb->magic = MAGIC_BUFFER;
74 EXPORT_SYMBOL_GPL(videobuf_alloc_vb);
76 static int state_neither_active_nor_queued(struct videobuf_queue *q,
77 struct videobuf_buffer *vb)
82 spin_lock_irqsave(q->irqlock, flags);
83 rc = vb->state != VIDEOBUF_ACTIVE && vb->state != VIDEOBUF_QUEUED;
84 spin_unlock_irqrestore(q->irqlock, flags);
88 int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
89 int non_blocking, int intr)
94 MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
97 if (state_neither_active_nor_queued(q, vb))
102 is_ext_locked = q->ext_lock && mutex_is_locked(q->ext_lock);
104 /* Release vdev lock to prevent this wait from blocking outside access to
107 mutex_unlock(q->ext_lock);
109 ret = wait_event_interruptible(vb->done,
110 state_neither_active_nor_queued(q, vb));
112 wait_event(vb->done, state_neither_active_nor_queued(q, vb));
115 mutex_lock(q->ext_lock);
119 EXPORT_SYMBOL_GPL(videobuf_waiton);
121 int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
122 struct v4l2_framebuffer *fbuf)
124 MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
125 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
127 return CALL(q, iolock, q, vb, fbuf);
129 EXPORT_SYMBOL_GPL(videobuf_iolock);
131 void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
132 struct videobuf_buffer *buf)
134 if (q->int_ops->vaddr)
135 return q->int_ops->vaddr(buf);
138 EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr);
140 /* --------------------------------------------------------------------- */
143 void videobuf_queue_core_init(struct videobuf_queue *q,
144 const struct videobuf_queue_ops *ops,
147 enum v4l2_buf_type type,
148 enum v4l2_field field,
151 struct videobuf_qtype_ops *int_ops,
152 struct mutex *ext_lock)
155 memset(q, 0, sizeof(*q));
156 q->irqlock = irqlock;
157 q->ext_lock = ext_lock;
164 q->int_ops = int_ops;
166 /* All buffer operations are mandatory */
167 BUG_ON(!q->ops->buf_setup);
168 BUG_ON(!q->ops->buf_prepare);
169 BUG_ON(!q->ops->buf_queue);
170 BUG_ON(!q->ops->buf_release);
172 /* Lock is mandatory for queue_cancel to work */
175 /* Having implementations for abstract methods are mandatory */
178 mutex_init(&q->vb_lock);
179 init_waitqueue_head(&q->wait);
180 INIT_LIST_HEAD(&q->stream);
182 EXPORT_SYMBOL_GPL(videobuf_queue_core_init);
184 /* Locking: Only usage in bttv unsafe find way to remove */
185 int videobuf_queue_is_busy(struct videobuf_queue *q)
189 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
192 dprintk(1, "busy: streaming active\n");
196 dprintk(1, "busy: pending read #1\n");
200 dprintk(1, "busy: pending read #2\n");
203 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
204 if (NULL == q->bufs[i])
206 if (q->bufs[i]->map) {
207 dprintk(1, "busy: buffer #%d mapped\n", i);
210 if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
211 dprintk(1, "busy: buffer #%d queued\n", i);
214 if (q->bufs[i]->state == VIDEOBUF_ACTIVE) {
215 dprintk(1, "busy: buffer #%d active\n", i);
221 EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
224 * __videobuf_free() - free all the buffers and their control structures
226 * This function can only be called if streaming/reading is off, i.e. no buffers
227 * are under control of the driver.
229 /* Locking: Caller holds q->vb_lock */
230 static int __videobuf_free(struct videobuf_queue *q)
234 dprintk(1, "%s\n", __func__);
238 if (q->streaming || q->reading) {
239 dprintk(1, "Cannot free buffers when streaming or reading\n");
243 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
245 for (i = 0; i < VIDEO_MAX_FRAME; i++)
246 if (q->bufs[i] && q->bufs[i]->map) {
247 dprintk(1, "Cannot free mmapped buffers\n");
251 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
252 if (NULL == q->bufs[i])
254 q->ops->buf_release(q, q->bufs[i]);
262 /* Locking: Caller holds q->vb_lock */
263 void videobuf_queue_cancel(struct videobuf_queue *q)
265 unsigned long flags = 0;
270 wake_up_interruptible_sync(&q->wait);
272 /* remove queued buffers from list */
273 spin_lock_irqsave(q->irqlock, flags);
274 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
275 if (NULL == q->bufs[i])
277 if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
278 list_del(&q->bufs[i]->queue);
279 q->bufs[i]->state = VIDEOBUF_ERROR;
280 wake_up_all(&q->bufs[i]->done);
283 spin_unlock_irqrestore(q->irqlock, flags);
285 /* free all buffers + clear queue */
286 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
287 if (NULL == q->bufs[i])
289 q->ops->buf_release(q, q->bufs[i]);
291 INIT_LIST_HEAD(&q->stream);
293 EXPORT_SYMBOL_GPL(videobuf_queue_cancel);
295 /* --------------------------------------------------------------------- */
297 /* Locking: Caller holds q->vb_lock */
298 enum v4l2_field videobuf_next_field(struct videobuf_queue *q)
300 enum v4l2_field field = q->field;
302 BUG_ON(V4L2_FIELD_ANY == field);
304 if (V4L2_FIELD_ALTERNATE == field) {
305 if (V4L2_FIELD_TOP == q->last) {
306 field = V4L2_FIELD_BOTTOM;
307 q->last = V4L2_FIELD_BOTTOM;
309 field = V4L2_FIELD_TOP;
310 q->last = V4L2_FIELD_TOP;
315 EXPORT_SYMBOL_GPL(videobuf_next_field);
317 /* Locking: Caller holds q->vb_lock */
318 static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
319 struct videobuf_buffer *vb, enum v4l2_buf_type type)
321 MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
322 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
327 b->memory = vb->memory;
329 case V4L2_MEMORY_MMAP:
330 b->m.offset = vb->boff;
331 b->length = vb->bsize;
333 case V4L2_MEMORY_USERPTR:
334 b->m.userptr = vb->baddr;
335 b->length = vb->bsize;
337 case V4L2_MEMORY_OVERLAY:
338 b->m.offset = vb->boff;
340 case V4L2_MEMORY_DMABUF:
341 /* DMABUF is not handled in videobuf framework */
345 b->flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
347 b->flags |= V4L2_BUF_FLAG_MAPPED;
350 case VIDEOBUF_PREPARED:
351 case VIDEOBUF_QUEUED:
352 case VIDEOBUF_ACTIVE:
353 b->flags |= V4L2_BUF_FLAG_QUEUED;
356 b->flags |= V4L2_BUF_FLAG_ERROR;
359 b->flags |= V4L2_BUF_FLAG_DONE;
361 case VIDEOBUF_NEEDS_INIT:
367 b->field = vb->field;
368 v4l2_buffer_set_timestamp(b, vb->ts);
369 b->bytesused = vb->size;
370 b->sequence = vb->field_count >> 1;
373 int videobuf_mmap_free(struct videobuf_queue *q)
376 videobuf_queue_lock(q);
377 ret = __videobuf_free(q);
378 videobuf_queue_unlock(q);
381 EXPORT_SYMBOL_GPL(videobuf_mmap_free);
383 /* Locking: Caller holds q->vb_lock */
384 int __videobuf_mmap_setup(struct videobuf_queue *q,
385 unsigned int bcount, unsigned int bsize,
386 enum v4l2_memory memory)
391 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
393 err = __videobuf_free(q);
397 /* Allocate and initialize buffers */
398 for (i = 0; i < bcount; i++) {
399 q->bufs[i] = videobuf_alloc_vb(q);
401 if (NULL == q->bufs[i])
405 q->bufs[i]->memory = memory;
406 q->bufs[i]->bsize = bsize;
408 case V4L2_MEMORY_MMAP:
409 q->bufs[i]->boff = PAGE_ALIGN(bsize) * i;
411 case V4L2_MEMORY_USERPTR:
412 case V4L2_MEMORY_OVERLAY:
413 case V4L2_MEMORY_DMABUF:
422 dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i, bsize);
426 EXPORT_SYMBOL_GPL(__videobuf_mmap_setup);
428 int videobuf_mmap_setup(struct videobuf_queue *q,
429 unsigned int bcount, unsigned int bsize,
430 enum v4l2_memory memory)
433 videobuf_queue_lock(q);
434 ret = __videobuf_mmap_setup(q, bcount, bsize, memory);
435 videobuf_queue_unlock(q);
438 EXPORT_SYMBOL_GPL(videobuf_mmap_setup);
440 int videobuf_reqbufs(struct videobuf_queue *q,
441 struct v4l2_requestbuffers *req)
443 unsigned int size, count;
446 if (req->memory != V4L2_MEMORY_MMAP &&
447 req->memory != V4L2_MEMORY_USERPTR &&
448 req->memory != V4L2_MEMORY_OVERLAY) {
449 dprintk(1, "reqbufs: memory type invalid\n");
453 videobuf_queue_lock(q);
454 if (req->type != q->type) {
455 dprintk(1, "reqbufs: queue type invalid\n");
461 dprintk(1, "reqbufs: streaming already exists\n");
465 if (!list_empty(&q->stream)) {
466 dprintk(1, "reqbufs: stream running\n");
471 if (req->count == 0) {
472 dprintk(1, "reqbufs: count invalid (%d)\n", req->count);
473 retval = __videobuf_free(q);
478 if (count > VIDEO_MAX_FRAME)
479 count = VIDEO_MAX_FRAME;
481 q->ops->buf_setup(q, &count, &size);
482 dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n",
484 (unsigned int)((count * PAGE_ALIGN(size)) >> PAGE_SHIFT));
486 retval = __videobuf_mmap_setup(q, count, size, req->memory);
488 dprintk(1, "reqbufs: mmap setup returned %d\n", retval);
496 videobuf_queue_unlock(q);
499 EXPORT_SYMBOL_GPL(videobuf_reqbufs);
501 int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
505 videobuf_queue_lock(q);
506 if (unlikely(b->type != q->type)) {
507 dprintk(1, "querybuf: Wrong type.\n");
510 if (unlikely(b->index >= VIDEO_MAX_FRAME)) {
511 dprintk(1, "querybuf: index out of range.\n");
514 if (unlikely(NULL == q->bufs[b->index])) {
515 dprintk(1, "querybuf: buffer is null.\n");
519 videobuf_status(q, b, q->bufs[b->index], q->type);
523 videobuf_queue_unlock(q);
526 EXPORT_SYMBOL_GPL(videobuf_querybuf);
528 int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
530 struct videobuf_buffer *buf;
531 enum v4l2_field field;
532 unsigned long flags = 0;
535 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
537 if (b->memory == V4L2_MEMORY_MMAP)
538 mmap_read_lock(current->mm);
540 videobuf_queue_lock(q);
543 dprintk(1, "qbuf: Reading running...\n");
547 if (b->type != q->type) {
548 dprintk(1, "qbuf: Wrong type.\n");
551 if (b->index >= VIDEO_MAX_FRAME) {
552 dprintk(1, "qbuf: index out of range.\n");
555 buf = q->bufs[b->index];
557 dprintk(1, "qbuf: buffer is null.\n");
560 MAGIC_CHECK(buf->magic, MAGIC_BUFFER);
561 if (buf->memory != b->memory) {
562 dprintk(1, "qbuf: memory type is wrong.\n");
565 if (buf->state != VIDEOBUF_NEEDS_INIT && buf->state != VIDEOBUF_IDLE) {
566 dprintk(1, "qbuf: buffer is already queued or active.\n");
571 case V4L2_MEMORY_MMAP:
572 if (0 == buf->baddr) {
573 dprintk(1, "qbuf: mmap requested but buffer addr is zero!\n");
576 if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT
577 || q->type == V4L2_BUF_TYPE_VBI_OUTPUT
578 || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT
579 || q->type == V4L2_BUF_TYPE_SDR_OUTPUT) {
580 buf->size = b->bytesused;
581 buf->field = b->field;
582 buf->ts = v4l2_buffer_get_timestamp(b);
585 case V4L2_MEMORY_USERPTR:
586 if (b->length < buf->bsize) {
587 dprintk(1, "qbuf: buffer length is not enough\n");
590 if (VIDEOBUF_NEEDS_INIT != buf->state &&
591 buf->baddr != b->m.userptr)
592 q->ops->buf_release(q, buf);
593 buf->baddr = b->m.userptr;
595 case V4L2_MEMORY_OVERLAY:
596 buf->boff = b->m.offset;
599 dprintk(1, "qbuf: wrong memory type\n");
603 dprintk(1, "qbuf: requesting next field\n");
604 field = videobuf_next_field(q);
605 retval = q->ops->buf_prepare(q, buf, field);
607 dprintk(1, "qbuf: buffer_prepare returned %d\n", retval);
611 list_add_tail(&buf->stream, &q->stream);
613 spin_lock_irqsave(q->irqlock, flags);
614 q->ops->buf_queue(q, buf);
615 spin_unlock_irqrestore(q->irqlock, flags);
617 dprintk(1, "qbuf: succeeded\n");
619 wake_up_interruptible_sync(&q->wait);
622 videobuf_queue_unlock(q);
624 if (b->memory == V4L2_MEMORY_MMAP)
625 mmap_read_unlock(current->mm);
629 EXPORT_SYMBOL_GPL(videobuf_qbuf);
631 /* Locking: Caller holds q->vb_lock */
632 static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock)
638 dprintk(1, "next_buffer: Not streaming\n");
643 if (list_empty(&q->stream)) {
646 dprintk(2, "next_buffer: no buffers to dequeue\n");
649 dprintk(2, "next_buffer: waiting on buffer\n");
651 /* Drop lock to avoid deadlock with qbuf */
652 videobuf_queue_unlock(q);
654 /* Checking list_empty and streaming is safe without
655 * locks because we goto checks to validate while
656 * holding locks before proceeding */
657 retval = wait_event_interruptible(q->wait,
658 !list_empty(&q->stream) || !q->streaming);
659 videobuf_queue_lock(q);
674 /* Locking: Caller holds q->vb_lock */
675 static int stream_next_buffer(struct videobuf_queue *q,
676 struct videobuf_buffer **vb, int nonblocking)
679 struct videobuf_buffer *buf = NULL;
681 retval = stream_next_buffer_check_queue(q, nonblocking);
685 buf = list_entry(q->stream.next, struct videobuf_buffer, stream);
686 retval = videobuf_waiton(q, buf, nonblocking, 1);
695 int videobuf_dqbuf(struct videobuf_queue *q,
696 struct v4l2_buffer *b, int nonblocking)
698 struct videobuf_buffer *buf = NULL;
701 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
703 memset(b, 0, sizeof(*b));
704 videobuf_queue_lock(q);
706 retval = stream_next_buffer(q, &buf, nonblocking);
708 dprintk(1, "dqbuf: next_buffer error: %i\n", retval);
712 switch (buf->state) {
714 dprintk(1, "dqbuf: state is error\n");
717 dprintk(1, "dqbuf: state is done\n");
720 dprintk(1, "dqbuf: state invalid\n");
724 CALL(q, sync, q, buf);
725 videobuf_status(q, b, buf, q->type);
726 list_del(&buf->stream);
727 buf->state = VIDEOBUF_IDLE;
728 b->flags &= ~V4L2_BUF_FLAG_DONE;
730 videobuf_queue_unlock(q);
733 EXPORT_SYMBOL_GPL(videobuf_dqbuf);
735 int videobuf_streamon(struct videobuf_queue *q)
737 struct videobuf_buffer *buf;
738 unsigned long flags = 0;
741 videobuf_queue_lock(q);
749 spin_lock_irqsave(q->irqlock, flags);
750 list_for_each_entry(buf, &q->stream, stream)
751 if (buf->state == VIDEOBUF_PREPARED)
752 q->ops->buf_queue(q, buf);
753 spin_unlock_irqrestore(q->irqlock, flags);
755 wake_up_interruptible_sync(&q->wait);
757 videobuf_queue_unlock(q);
760 EXPORT_SYMBOL_GPL(videobuf_streamon);
762 /* Locking: Caller holds q->vb_lock */
763 static int __videobuf_streamoff(struct videobuf_queue *q)
768 videobuf_queue_cancel(q);
773 int videobuf_streamoff(struct videobuf_queue *q)
777 videobuf_queue_lock(q);
778 retval = __videobuf_streamoff(q);
779 videobuf_queue_unlock(q);
783 EXPORT_SYMBOL_GPL(videobuf_streamoff);
785 /* Locking: Caller holds q->vb_lock */
786 static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
788 size_t count, loff_t *ppos)
790 enum v4l2_field field;
791 unsigned long flags = 0;
794 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
797 q->read_buf = videobuf_alloc_vb(q);
798 if (NULL == q->read_buf)
801 q->read_buf->memory = V4L2_MEMORY_USERPTR;
802 q->read_buf->baddr = (unsigned long)data;
803 q->read_buf->bsize = count;
805 field = videobuf_next_field(q);
806 retval = q->ops->buf_prepare(q, q->read_buf, field);
810 /* start capture & wait */
811 spin_lock_irqsave(q->irqlock, flags);
812 q->ops->buf_queue(q, q->read_buf);
813 spin_unlock_irqrestore(q->irqlock, flags);
814 retval = videobuf_waiton(q, q->read_buf, 0, 0);
816 CALL(q, sync, q, q->read_buf);
817 if (VIDEOBUF_ERROR == q->read_buf->state)
820 retval = q->read_buf->size;
825 q->ops->buf_release(q, q->read_buf);
831 static int __videobuf_copy_to_user(struct videobuf_queue *q,
832 struct videobuf_buffer *buf,
833 char __user *data, size_t count,
836 void *vaddr = CALLPTR(q, vaddr, buf);
838 /* copy to userspace */
839 if (count > buf->size - q->read_off)
840 count = buf->size - q->read_off;
842 if (copy_to_user(data, vaddr + q->read_off, count))
848 static int __videobuf_copy_stream(struct videobuf_queue *q,
849 struct videobuf_buffer *buf,
850 char __user *data, size_t count, size_t pos,
851 int vbihack, int nonblocking)
853 unsigned int *fc = CALLPTR(q, vaddr, buf);
856 /* dirty, undocumented hack -- pass the frame counter
857 * within the last four bytes of each vbi data block.
858 * We need that one to maintain backward compatibility
859 * to all vbi decoding software out there ... */
860 fc += (buf->size >> 2) - 1;
861 *fc = buf->field_count >> 1;
862 dprintk(1, "vbihack: %d\n", *fc);
865 /* copy stuff using the common method */
866 count = __videobuf_copy_to_user(q, buf, data, count, nonblocking);
868 if ((count == -EFAULT) && (pos == 0))
874 ssize_t videobuf_read_one(struct videobuf_queue *q,
875 char __user *data, size_t count, loff_t *ppos,
878 enum v4l2_field field;
879 unsigned long flags = 0;
880 unsigned size = 0, nbufs = 1;
883 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
885 videobuf_queue_lock(q);
887 q->ops->buf_setup(q, &nbufs, &size);
889 if (NULL == q->read_buf &&
892 retval = videobuf_read_zerocopy(q, data, count, ppos);
893 if (retval >= 0 || retval == -EIO)
896 /* fallback to kernel bounce buffer on failures */
899 if (NULL == q->read_buf) {
900 /* need to capture a new frame */
902 q->read_buf = videobuf_alloc_vb(q);
904 dprintk(1, "video alloc=0x%p\n", q->read_buf);
905 if (NULL == q->read_buf)
907 q->read_buf->memory = V4L2_MEMORY_USERPTR;
908 q->read_buf->bsize = count; /* preferred size */
909 field = videobuf_next_field(q);
910 retval = q->ops->buf_prepare(q, q->read_buf, field);
918 spin_lock_irqsave(q->irqlock, flags);
919 q->ops->buf_queue(q, q->read_buf);
920 spin_unlock_irqrestore(q->irqlock, flags);
925 /* wait until capture is done */
926 retval = videobuf_waiton(q, q->read_buf, nonblocking, 1);
930 CALL(q, sync, q, q->read_buf);
932 if (VIDEOBUF_ERROR == q->read_buf->state) {
933 /* catch I/O errors */
934 q->ops->buf_release(q, q->read_buf);
941 /* Copy to userspace */
942 retval = __videobuf_copy_to_user(q, q->read_buf, data, count, nonblocking);
946 q->read_off += retval;
947 if (q->read_off == q->read_buf->size) {
948 /* all data copied, cleanup */
949 q->ops->buf_release(q, q->read_buf);
955 videobuf_queue_unlock(q);
958 EXPORT_SYMBOL_GPL(videobuf_read_one);
960 /* Locking: Caller holds q->vb_lock */
961 static int __videobuf_read_start(struct videobuf_queue *q)
963 enum v4l2_field field;
964 unsigned long flags = 0;
965 unsigned int count = 0, size = 0;
968 q->ops->buf_setup(q, &count, &size);
971 if (count > VIDEO_MAX_FRAME)
972 count = VIDEO_MAX_FRAME;
973 size = PAGE_ALIGN(size);
975 err = __videobuf_mmap_setup(q, count, size, V4L2_MEMORY_USERPTR);
981 for (i = 0; i < count; i++) {
982 field = videobuf_next_field(q);
983 err = q->ops->buf_prepare(q, q->bufs[i], field);
986 list_add_tail(&q->bufs[i]->stream, &q->stream);
988 spin_lock_irqsave(q->irqlock, flags);
989 for (i = 0; i < count; i++)
990 q->ops->buf_queue(q, q->bufs[i]);
991 spin_unlock_irqrestore(q->irqlock, flags);
996 static void __videobuf_read_stop(struct videobuf_queue *q)
1000 videobuf_queue_cancel(q);
1002 INIT_LIST_HEAD(&q->stream);
1003 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
1004 if (NULL == q->bufs[i])
1012 int videobuf_read_start(struct videobuf_queue *q)
1016 videobuf_queue_lock(q);
1017 rc = __videobuf_read_start(q);
1018 videobuf_queue_unlock(q);
1022 EXPORT_SYMBOL_GPL(videobuf_read_start);
1024 void videobuf_read_stop(struct videobuf_queue *q)
1026 videobuf_queue_lock(q);
1027 __videobuf_read_stop(q);
1028 videobuf_queue_unlock(q);
1030 EXPORT_SYMBOL_GPL(videobuf_read_stop);
1032 void videobuf_stop(struct videobuf_queue *q)
1034 videobuf_queue_lock(q);
1037 __videobuf_streamoff(q);
1040 __videobuf_read_stop(q);
1042 videobuf_queue_unlock(q);
1044 EXPORT_SYMBOL_GPL(videobuf_stop);
1046 ssize_t videobuf_read_stream(struct videobuf_queue *q,
1047 char __user *data, size_t count, loff_t *ppos,
1048 int vbihack, int nonblocking)
1051 unsigned long flags = 0;
1053 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
1055 dprintk(2, "%s\n", __func__);
1056 videobuf_queue_lock(q);
1061 retval = __videobuf_read_start(q);
1068 /* get / wait for data */
1069 if (NULL == q->read_buf) {
1070 q->read_buf = list_entry(q->stream.next,
1071 struct videobuf_buffer,
1073 list_del(&q->read_buf->stream);
1076 rc = videobuf_waiton(q, q->read_buf, nonblocking, 1);
1083 if (q->read_buf->state == VIDEOBUF_DONE) {
1084 rc = __videobuf_copy_stream(q, q->read_buf, data + retval, count,
1085 retval, vbihack, nonblocking);
1095 q->read_off = q->read_buf->size;
1100 /* requeue buffer when done with copying */
1101 if (q->read_off == q->read_buf->size) {
1102 list_add_tail(&q->read_buf->stream,
1104 spin_lock_irqsave(q->irqlock, flags);
1105 q->ops->buf_queue(q, q->read_buf);
1106 spin_unlock_irqrestore(q->irqlock, flags);
1114 videobuf_queue_unlock(q);
1117 EXPORT_SYMBOL_GPL(videobuf_read_stream);
1119 __poll_t videobuf_poll_stream(struct file *file,
1120 struct videobuf_queue *q,
1123 __poll_t req_events = poll_requested_events(wait);
1124 struct videobuf_buffer *buf = NULL;
1127 videobuf_queue_lock(q);
1129 if (!list_empty(&q->stream))
1130 buf = list_entry(q->stream.next,
1131 struct videobuf_buffer, stream);
1132 } else if (req_events & (EPOLLIN | EPOLLRDNORM)) {
1134 __videobuf_read_start(q);
1137 } else if (NULL == q->read_buf) {
1138 q->read_buf = list_entry(q->stream.next,
1139 struct videobuf_buffer,
1141 list_del(&q->read_buf->stream);
1147 poll_wait(file, &buf->done, wait);
1152 if (buf->state == VIDEOBUF_DONE ||
1153 buf->state == VIDEOBUF_ERROR) {
1155 case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1156 case V4L2_BUF_TYPE_VBI_OUTPUT:
1157 case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
1158 case V4L2_BUF_TYPE_SDR_OUTPUT:
1159 rc = EPOLLOUT | EPOLLWRNORM;
1162 rc = EPOLLIN | EPOLLRDNORM;
1167 videobuf_queue_unlock(q);
1170 EXPORT_SYMBOL_GPL(videobuf_poll_stream);
1172 int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
1177 MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
1179 if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) {
1180 dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n");
1184 videobuf_queue_lock(q);
1185 for (i = 0; i < VIDEO_MAX_FRAME; i++) {
1186 struct videobuf_buffer *buf = q->bufs[i];
1188 if (buf && buf->memory == V4L2_MEMORY_MMAP &&
1189 buf->boff == (vma->vm_pgoff << PAGE_SHIFT)) {
1190 rc = CALL(q, mmap_mapper, q, buf, vma);
1194 videobuf_queue_unlock(q);
1198 EXPORT_SYMBOL_GPL(videobuf_mmap_mapper);