* Request handling
*/
+/*
+ * Callers must take care to hold req_lock when this function may be called
+ * from multiple threads. For example, when frames are streaming to the host.
+ */
static void
uvc_video_free_request(struct uvc_request *ureq, struct usb_ep *ep)
{
struct uvc_request *ureq = req->context;
struct uvc_video *video = ureq->video;
struct uvc_video_queue *queue = &video->queue;
- struct uvc_device *uvc = video->uvc;
+ struct uvc_buffer *last_buf;
unsigned long flags;
+ spin_lock_irqsave(&video->req_lock, flags);
+ if (!video->is_enabled) {
+ /*
+ * When is_enabled is false, uvcg_video_disable() ensures
+ * that in-flight uvc_buffers are returned, so we can
+ * safely call free_request without worrying about
+ * last_buf.
+ */
+ uvc_video_free_request(ureq, ep);
+ spin_unlock_irqrestore(&video->req_lock, flags);
+ return;
+ }
+
+ last_buf = ureq->last_buf;
+ ureq->last_buf = NULL;
+ spin_unlock_irqrestore(&video->req_lock, flags);
+
switch (req->status) {
case 0:
break;
uvcg_queue_cancel(queue, 0);
}
- if (ureq->last_buf) {
- uvcg_complete_buffer(&video->queue, ureq->last_buf);
- ureq->last_buf = NULL;
+ if (last_buf) {
+ spin_lock_irqsave(&queue->irqlock, flags);
+ uvcg_complete_buffer(queue, last_buf);
+ spin_unlock_irqrestore(&queue->irqlock, flags);
}
spin_lock_irqsave(&video->req_lock, flags);
- list_add_tail(&req->list, &video->req_free);
- spin_unlock_irqrestore(&video->req_lock, flags);
-
- if (uvc->state == UVC_STATE_STREAMING)
+ /*
+ * Video stream might have been disabled while we were
+ * processing the current usb_request. So make sure
+ * we're still streaming before queueing the usb_request
+ * back to req_free
+ */
+ if (video->is_enabled) {
+ list_add_tail(&req->list, &video->req_free);
queue_work(video->async_wq, &video->pump);
+ } else {
+ uvc_video_free_request(ureq, ep);
+ }
+ spin_unlock_irqrestore(&video->req_lock, flags);
}
static int
struct uvc_video_queue *queue = &video->queue;
/* video->max_payload_size is only set when using bulk transfer */
bool is_bulk = video->max_payload_size;
- struct uvc_device *uvc = video->uvc;
struct usb_request *req = NULL;
struct uvc_buffer *buf;
unsigned long flags;
bool buf_done;
int ret;
- while (uvc->state == UVC_STATE_STREAMING && video->ep->enabled) {
+ while (true) {
+ if (!video->ep->enabled)
+ return;
+
/*
- * Retrieve the first available USB request, protected by the
- * request lock.
+ * Check is_enabled and retrieve the first available USB
+ * request, protected by the request lock.
*/
spin_lock_irqsave(&video->req_lock, flags);
- if (list_empty(&video->req_free)) {
+ if (!video->is_enabled || list_empty(&video->req_free)) {
spin_unlock_irqrestore(&video->req_lock, flags);
return;
}
return;
spin_lock_irqsave(&video->req_lock, flags);
- list_add_tail(&req->list, &video->req_free);
+ if (video->is_enabled)
+ list_add_tail(&req->list, &video->req_free);
+ else
+ uvc_video_free_request(req->context, video->ep);
spin_unlock_irqrestore(&video->req_lock, flags);
- return;
}
/*
int
uvcg_video_disable(struct uvc_video *video)
{
- struct uvc_request *ureq;
+ unsigned long flags;
+ struct list_head inflight_bufs;
+ struct usb_request *req, *temp;
+ struct uvc_buffer *buf, *btemp;
+ struct uvc_request *ureq, *utemp;
if (video->ep == NULL) {
uvcg_info(&video->uvc->func,
return -ENODEV;
}
+ INIT_LIST_HEAD(&inflight_bufs);
+ spin_lock_irqsave(&video->req_lock, flags);
+ video->is_enabled = false;
+
+ /*
+ * Remove any in-flight buffers from the uvc_requests
+ * because we want to return them before cancelling the
+ * queue. This ensures that we aren't stuck waiting for
+ * all complete callbacks to come through before disabling
+ * vb2 queue.
+ */
+ list_for_each_entry(ureq, &video->ureqs, list) {
+ if (ureq->last_buf) {
+ list_add_tail(&ureq->last_buf->queue, &inflight_bufs);
+ ureq->last_buf = NULL;
+ }
+ }
+ spin_unlock_irqrestore(&video->req_lock, flags);
+
cancel_work_sync(&video->pump);
uvcg_queue_cancel(&video->queue, 0);
- list_for_each_entry(ureq, &video->ureqs, list) {
- if (ureq->req)
- usb_ep_dequeue(video->ep, ureq->req);
+ spin_lock_irqsave(&video->req_lock, flags);
+ /*
+ * Remove all uvc_requests from ureqs with list_del_init
+ * This lets uvc_video_free_request correctly identify
+ * if the uvc_request is attached to a list or not when freeing
+ * memory.
+ */
+ list_for_each_entry_safe(ureq, utemp, &video->ureqs, list)
+ list_del_init(&ureq->list);
+
+ list_for_each_entry_safe(req, temp, &video->req_free, list) {
+ list_del(&req->list);
+ uvc_video_free_request(req->context, video->ep);
}
- uvc_video_free_requests(video);
+ INIT_LIST_HEAD(&video->ureqs);
+ INIT_LIST_HEAD(&video->req_free);
+ video->req_size = 0;
+ spin_unlock_irqrestore(&video->req_lock, flags);
+
+ /*
+ * Return all the video buffers before disabling the queue.
+ */
+ spin_lock_irqsave(&video->queue.irqlock, flags);
+ list_for_each_entry_safe(buf, btemp, &inflight_bufs, queue) {
+ list_del(&buf->queue);
+ uvcg_complete_buffer(&video->queue, buf);
+ }
+ spin_unlock_irqrestore(&video->queue.irqlock, flags);
+
uvcg_queue_enable(&video->queue, 0);
return 0;
}
return -ENODEV;
}
+ /*
+ * Safe to access request related fields without req_lock because
+ * this is the only thread currently active, and no other
+ * request handling thread will become active until this function
+ * returns.
+ */
+ video->is_enabled = true;
+
if ((ret = uvcg_queue_enable(&video->queue, 1)) < 0)
return ret;
*/
int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
{
+ video->is_enabled = false;
INIT_LIST_HEAD(&video->ureqs);
INIT_LIST_HEAD(&video->req_free);
spin_lock_init(&video->req_lock);