Commit | Line | Data |
---|---|---|
5fd54ace | 1 | // SPDX-License-Identifier: GPL-2.0+ |
cdda479f LP |
2 | /* |
3 | * uvc_queue.c -- USB Video Class driver - Buffers management | |
4 | * | |
5 | * Copyright (C) 2005-2010 | |
6 | * Laurent Pinchart (laurent.pinchart@ideasonboard.com) | |
cdda479f LP |
7 | */ |
8 | ||
d6925225 | 9 | #include <linux/atomic.h> |
cdda479f LP |
10 | #include <linux/kernel.h> |
11 | #include <linux/mm.h> | |
12 | #include <linux/list.h> | |
13 | #include <linux/module.h> | |
14 | #include <linux/usb.h> | |
15 | #include <linux/videodev2.h> | |
16 | #include <linux/vmalloc.h> | |
17 | #include <linux/wait.h> | |
d6925225 | 18 | |
16bf900f | 19 | #include <media/v4l2-common.h> |
e81e7f9a | 20 | #include <media/videobuf2-dma-sg.h> |
d6925225 | 21 | #include <media/videobuf2-vmalloc.h> |
cdda479f LP |
22 | |
23 | #include "uvc.h" | |
1dc2527c | 24 | #include "uvc_video.h" |
cdda479f LP |
25 | |
26 | /* ------------------------------------------------------------------------ | |
27 | * Video buffers queue management. | |
28 | * | |
7ea95b11 | 29 | * Video queues is initialized by uvcg_queue_init(). The function performs |
cdda479f LP |
30 | * basic initialization of the uvc_video_queue struct and never fails. |
31 | * | |
d6925225 BS |
32 | * Video buffers are managed by videobuf2. The driver uses a mutex to protect |
33 | * the videobuf2 queue operations by serializing calls to videobuf2 and a | |
34 | * spinlock to protect the IRQ queue that holds the buffers to be processed by | |
35 | * the driver. | |
cdda479f LP |
36 | */ |
37 | ||
d6925225 BS |
38 | /* ----------------------------------------------------------------------------- |
39 | * videobuf2 queue operations | |
5d9955f8 | 40 | */ |
d6925225 | 41 | |
df9ecb0c | 42 | static int uvc_queue_setup(struct vb2_queue *vq, |
d6925225 | 43 | unsigned int *nbuffers, unsigned int *nplanes, |
36c0f8b3 | 44 | unsigned int sizes[], struct device *alloc_devs[]) |
5d9955f8 | 45 | { |
d6925225 BS |
46 | struct uvc_video_queue *queue = vb2_get_drv_priv(vq); |
47 | struct uvc_video *video = container_of(queue, struct uvc_video, queue); | |
5d9955f8 | 48 | |
d6925225 BS |
49 | if (*nbuffers > UVC_MAX_VIDEO_BUFFERS) |
50 | *nbuffers = UVC_MAX_VIDEO_BUFFERS; | |
1dc2527c MG |
51 | if (*nbuffers < UVCG_STREAMING_MIN_BUFFERS) |
52 | *nbuffers = UVCG_STREAMING_MIN_BUFFERS; | |
5d9955f8 | 53 | |
d6925225 BS |
54 | *nplanes = 1; |
55 | ||
56 | sizes[0] = video->imagesize; | |
5d9955f8 LP |
57 | |
58 | return 0; | |
59 | } | |
60 | ||
d6925225 | 61 | static int uvc_buffer_prepare(struct vb2_buffer *vb) |
cdda479f | 62 | { |
d6925225 | 63 | struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); |
98ad0329 | 64 | struct uvc_video *video = container_of(queue, struct uvc_video, queue); |
2d700715 JS |
65 | struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); |
66 | struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf); | |
cdda479f | 67 | |
2d700715 | 68 | if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && |
d6925225 BS |
69 | vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) { |
70 | uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n"); | |
71 | return -EINVAL; | |
72 | } | |
cdda479f | 73 | |
d6925225 BS |
74 | if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) |
75 | return -ENODEV; | |
cdda479f | 76 | |
d6925225 | 77 | buf->state = UVC_BUF_STATE_QUEUED; |
88c8e05e GKH |
78 | if (queue->use_sg) { |
79 | buf->sgt = vb2_dma_sg_plane_desc(vb, 0); | |
80 | buf->sg = buf->sgt->sgl; | |
81 | } else { | |
82 | buf->mem = vb2_plane_vaddr(vb, 0); | |
83 | } | |
d6925225 | 84 | buf->length = vb2_plane_size(vb, 0); |
98ad0329 | 85 | if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { |
d6925225 | 86 | buf->bytesused = 0; |
98ad0329 | 87 | } else { |
d6925225 | 88 | buf->bytesused = vb2_get_plane_payload(vb, 0); |
98ad0329 MG |
89 | buf->req_payload_size = |
90 | DIV_ROUND_UP(buf->bytesused + | |
91 | (video->reqs_per_frame * UVCG_REQUEST_HEADER_LEN), | |
92 | video->reqs_per_frame); | |
93 | } | |
cdda479f | 94 | |
d6925225 BS |
95 | return 0; |
96 | } | |
cdda479f | 97 | |
d6925225 BS |
98 | static void uvc_buffer_queue(struct vb2_buffer *vb) |
99 | { | |
100 | struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); | |
2d700715 JS |
101 | struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); |
102 | struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf); | |
d6925225 | 103 | unsigned long flags; |
cdda479f | 104 | |
d6925225 | 105 | spin_lock_irqsave(&queue->irqlock, flags); |
cdda479f | 106 | |
d6925225 BS |
107 | if (likely(!(queue->flags & UVC_QUEUE_DISCONNECTED))) { |
108 | list_add_tail(&buf->queue, &queue->irqqueue); | |
109 | } else { | |
c5d337a3 LP |
110 | /* |
111 | * If the device is disconnected return the buffer to userspace | |
d6925225 BS |
112 | * directly. The next QBUF call will fail with -ENODEV. |
113 | */ | |
114 | buf->state = UVC_BUF_STATE_ERROR; | |
2d700715 | 115 | vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); |
cdda479f LP |
116 | } |
117 | ||
d6925225 | 118 | spin_unlock_irqrestore(&queue->irqlock, flags); |
cdda479f LP |
119 | } |
120 | ||
e8942838 | 121 | static const struct vb2_ops uvc_queue_qops = { |
d6925225 BS |
122 | .queue_setup = uvc_queue_setup, |
123 | .buf_prepare = uvc_buffer_prepare, | |
124 | .buf_queue = uvc_buffer_queue, | |
125 | }; | |
cdda479f | 126 | |
e81e7f9a | 127 | int uvcg_queue_init(struct uvc_video_queue *queue, struct device *dev, enum v4l2_buf_type type, |
d8e96c4b | 128 | struct mutex *lock) |
cdda479f | 129 | { |
e81e7f9a MG |
130 | struct uvc_video *video = container_of(queue, struct uvc_video, queue); |
131 | struct usb_composite_dev *cdev = video->uvc->func.config->cdev; | |
d6925225 | 132 | int ret; |
cdda479f | 133 | |
d6925225 | 134 | queue->queue.type = type; |
ee7ec7f6 | 135 | queue->queue.io_modes = VB2_MMAP | VB2_USERPTR | VB2_DMABUF; |
d6925225 BS |
136 | queue->queue.drv_priv = queue; |
137 | queue->queue.buf_struct_size = sizeof(struct uvc_buffer); | |
138 | queue->queue.ops = &uvc_queue_qops; | |
d8e96c4b | 139 | queue->queue.lock = lock; |
e81e7f9a MG |
140 | if (cdev->gadget->sg_supported) { |
141 | queue->queue.mem_ops = &vb2_dma_sg_memops; | |
142 | queue->use_sg = 1; | |
143 | } else { | |
144 | queue->queue.mem_ops = &vb2_vmalloc_memops; | |
145 | } | |
146 | ||
fd03af27 | 147 | queue->queue.timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_COPY |
c9e44b53 | 148 | | V4L2_BUF_FLAG_TSTAMP_SRC_EOF; |
e81e7f9a MG |
149 | queue->queue.dev = dev; |
150 | ||
d6925225 BS |
151 | ret = vb2_queue_init(&queue->queue); |
152 | if (ret) | |
153 | return ret; | |
154 | ||
d6925225 BS |
155 | spin_lock_init(&queue->irqlock); |
156 | INIT_LIST_HEAD(&queue->irqqueue); | |
157 | queue->flags = 0; | |
cdda479f | 158 | |
d6925225 BS |
159 | return 0; |
160 | } | |
cdda479f | 161 | |
d6925225 BS |
162 | /* |
163 | * Free the video buffers. | |
164 | */ | |
3a83c16e | 165 | void uvcg_free_buffers(struct uvc_video_queue *queue) |
d6925225 | 166 | { |
d6925225 | 167 | vb2_queue_release(&queue->queue); |
cdda479f LP |
168 | } |
169 | ||
170 | /* | |
d6925225 | 171 | * Allocate the video buffers. |
cdda479f | 172 | */ |
3a83c16e | 173 | int uvcg_alloc_buffers(struct uvc_video_queue *queue, |
7ea95b11 | 174 | struct v4l2_requestbuffers *rb) |
cdda479f | 175 | { |
d6925225 | 176 | int ret; |
cdda479f | 177 | |
d6925225 | 178 | ret = vb2_reqbufs(&queue->queue, rb); |
cdda479f | 179 | |
d6925225 BS |
180 | return ret ? ret : rb->count; |
181 | } | |
cdda479f | 182 | |
3a83c16e | 183 | int uvcg_query_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) |
d6925225 | 184 | { |
d8e96c4b | 185 | return vb2_querybuf(&queue->queue, buf); |
d6925225 | 186 | } |
cdda479f | 187 | |
3a83c16e | 188 | int uvcg_queue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf) |
d6925225 | 189 | { |
be30e350 | 190 | return vb2_qbuf(&queue->queue, NULL, buf); |
cdda479f LP |
191 | } |
192 | ||
193 | /* | |
194 | * Dequeue a video buffer. If nonblocking is false, block until a buffer is | |
195 | * available. | |
196 | */ | |
3a83c16e AP |
197 | int uvcg_dequeue_buffer(struct uvc_video_queue *queue, struct v4l2_buffer *buf, |
198 | int nonblocking) | |
cdda479f | 199 | { |
d8e96c4b | 200 | return vb2_dqbuf(&queue->queue, buf, nonblocking); |
cdda479f LP |
201 | } |
202 | ||
203 | /* | |
204 | * Poll the video queue. | |
205 | * | |
206 | * This function implements video queue polling and is intended to be used by | |
207 | * the device poll handler. | |
208 | */ | |
c23e0cb8 | 209 | __poll_t uvcg_queue_poll(struct uvc_video_queue *queue, struct file *file, |
3a83c16e | 210 | poll_table *wait) |
cdda479f | 211 | { |
d8e96c4b | 212 | return vb2_poll(&queue->queue, file, wait); |
cdda479f LP |
213 | } |
214 | ||
3a83c16e | 215 | int uvcg_queue_mmap(struct uvc_video_queue *queue, struct vm_area_struct *vma) |
cdda479f | 216 | { |
d8e96c4b | 217 | return vb2_mmap(&queue->queue, vma); |
cdda479f LP |
218 | } |
219 | ||
2f1d5706 BS |
220 | #ifndef CONFIG_MMU |
221 | /* | |
222 | * Get unmapped area. | |
223 | * | |
224 | * NO-MMU arch need this function to make mmap() work correctly. | |
225 | */ | |
3a83c16e AP |
226 | unsigned long uvcg_queue_get_unmapped_area(struct uvc_video_queue *queue, |
227 | unsigned long pgoff) | |
2f1d5706 | 228 | { |
d8e96c4b | 229 | return vb2_get_unmapped_area(&queue->queue, 0, 0, pgoff, 0); |
2f1d5706 BS |
230 | } |
231 | #endif | |
232 | ||
5d9955f8 LP |
233 | /* |
234 | * Cancel the video buffers queue. | |
235 | * | |
236 | * Cancelling the queue marks all buffers on the irq queue as erroneous, | |
237 | * wakes them up and removes them from the queue. | |
238 | * | |
239 | * If the disconnect parameter is set, further calls to uvc_queue_buffer will | |
240 | * fail with -ENODEV. | |
241 | * | |
242 | * This function acquires the irq spinlock and can be called from interrupt | |
243 | * context. | |
244 | */ | |
3a83c16e | 245 | void uvcg_queue_cancel(struct uvc_video_queue *queue, int disconnect) |
5d9955f8 LP |
246 | { |
247 | struct uvc_buffer *buf; | |
248 | unsigned long flags; | |
249 | ||
250 | spin_lock_irqsave(&queue->irqlock, flags); | |
251 | while (!list_empty(&queue->irqqueue)) { | |
252 | buf = list_first_entry(&queue->irqqueue, struct uvc_buffer, | |
253 | queue); | |
254 | list_del(&buf->queue); | |
255 | buf->state = UVC_BUF_STATE_ERROR; | |
2d700715 | 256 | vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR); |
5d9955f8 | 257 | } |
71d471e3 DV |
258 | queue->buf_used = 0; |
259 | ||
c5d337a3 LP |
260 | /* |
261 | * This must be protected by the irqlock spinlock to avoid race | |
5d9955f8 LP |
262 | * conditions between uvc_queue_buffer and the disconnection event that |
263 | * could result in an interruptible wait in uvc_dequeue_buffer. Do not | |
264 | * blindly replace this logic by checking for the UVC_DEV_DISCONNECTED | |
265 | * state outside the queue code. | |
266 | */ | |
267 | if (disconnect) | |
268 | queue->flags |= UVC_QUEUE_DISCONNECTED; | |
269 | spin_unlock_irqrestore(&queue->irqlock, flags); | |
270 | } | |
271 | ||
cdda479f LP |
272 | /* |
273 | * Enable or disable the video buffers queue. | |
274 | * | |
275 | * The queue must be enabled before starting video acquisition and must be | |
276 | * disabled after stopping it. This ensures that the video buffers queue | |
277 | * state can be properly initialized before buffers are accessed from the | |
278 | * interrupt handler. | |
279 | * | |
280 | * Enabling the video queue initializes parameters (such as sequence number, | |
281 | * sync pattern, ...). If the queue is already enabled, return -EBUSY. | |
282 | * | |
283 | * Disabling the video queue cancels the queue and removes all buffers from | |
284 | * the main queue. | |
285 | * | |
286 | * This function can't be called from interrupt context. Use | |
7ea95b11 | 287 | * uvcg_queue_cancel() instead. |
cdda479f | 288 | */ |
3a83c16e | 289 | int uvcg_queue_enable(struct uvc_video_queue *queue, int enable) |
cdda479f | 290 | { |
d6925225 | 291 | unsigned long flags; |
cdda479f LP |
292 | int ret = 0; |
293 | ||
cdda479f | 294 | if (enable) { |
d6925225 BS |
295 | ret = vb2_streamon(&queue->queue, queue->queue.type); |
296 | if (ret < 0) | |
d8e96c4b | 297 | return ret; |
d6925225 | 298 | |
cdda479f | 299 | queue->sequence = 0; |
cdda479f | 300 | queue->buf_used = 0; |
0a0a2760 | 301 | queue->flags &= ~UVC_QUEUE_DROP_INCOMPLETE; |
cdda479f | 302 | } else { |
d6925225 BS |
303 | ret = vb2_streamoff(&queue->queue, queue->queue.type); |
304 | if (ret < 0) | |
d8e96c4b | 305 | return ret; |
cdda479f | 306 | |
d6925225 BS |
307 | spin_lock_irqsave(&queue->irqlock, flags); |
308 | INIT_LIST_HEAD(&queue->irqqueue); | |
cdda479f | 309 | |
d6925225 BS |
310 | /* |
311 | * FIXME: We need to clear the DISCONNECTED flag to ensure that | |
312 | * applications will be able to queue buffers for the next | |
313 | * streaming run. However, clearing it here doesn't guarantee | |
314 | * that the device will be reconnected in the meantime. | |
315 | */ | |
316 | queue->flags &= ~UVC_QUEUE_DISCONNECTED; | |
317 | spin_unlock_irqrestore(&queue->irqlock, flags); | |
cdda479f LP |
318 | } |
319 | ||
cdda479f LP |
320 | return ret; |
321 | } | |
322 | ||
95faf82b | 323 | /* called with &queue_irqlock held.. */ |
61aa709c | 324 | void uvcg_complete_buffer(struct uvc_video_queue *queue, |
3a83c16e | 325 | struct uvc_buffer *buf) |
cdda479f | 326 | { |
0a0a2760 DV |
327 | if (queue->flags & UVC_QUEUE_DROP_INCOMPLETE) { |
328 | queue->flags &= ~UVC_QUEUE_DROP_INCOMPLETE; | |
329 | buf->state = UVC_BUF_STATE_ERROR; | |
2d700715 | 330 | vb2_set_plane_payload(&buf->buf.vb2_buf, 0, 0); |
0a0a2760 | 331 | vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_ERROR); |
61aa709c | 332 | return; |
cdda479f LP |
333 | } |
334 | ||
2d700715 JS |
335 | buf->buf.field = V4L2_FIELD_NONE; |
336 | buf->buf.sequence = queue->sequence++; | |
d6dd645e | 337 | buf->buf.vb2_buf.timestamp = ktime_get_ns(); |
d6925225 | 338 | |
2d700715 JS |
339 | vb2_set_plane_payload(&buf->buf.vb2_buf, 0, buf->bytesused); |
340 | vb2_buffer_done(&buf->buf.vb2_buf, VB2_BUF_STATE_DONE); | |
cdda479f LP |
341 | } |
342 | ||
3a83c16e | 343 | struct uvc_buffer *uvcg_queue_head(struct uvc_video_queue *queue) |
cdda479f LP |
344 | { |
345 | struct uvc_buffer *buf = NULL; | |
346 | ||
347 | if (!list_empty(&queue->irqqueue)) | |
348 | buf = list_first_entry(&queue->irqqueue, struct uvc_buffer, | |
349 | queue); | |
cdda479f LP |
350 | |
351 | return buf; | |
352 | } | |
353 |