1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
4 * Copyright (C) 2015-2016 Samsung Electronics
5 * Igor Kotrasinski <i.kotrasinsk@samsung.com>
7 * Based on dummy_hcd.c, which is:
8 * Copyright (C) 2003 David Brownell
9 * Copyright (C) 2003-2005 Alan Stern
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
25 #include <linux/usb.h>
26 #include <linux/timer.h>
27 #include <linux/usb/ch9.h>
31 #define DEV_REQUEST (USB_TYPE_STANDARD | USB_RECIP_DEVICE)
32 #define DEV_INREQUEST (DEV_REQUEST | USB_DIR_IN)
33 #define INTF_REQUEST (USB_TYPE_STANDARD | USB_RECIP_INTERFACE)
34 #define INTF_INREQUEST (INTF_REQUEST | USB_DIR_IN)
35 #define EP_REQUEST (USB_TYPE_STANDARD | USB_RECIP_ENDPOINT)
36 #define EP_INREQUEST (EP_REQUEST | USB_DIR_IN)
38 static int get_frame_limit(enum usb_device_speed speed)
42 return 8 /*bytes*/ * 12 /*packets*/;
44 return 64 /*bytes*/ * 19 /*packets*/;
46 return 512 /*bytes*/ * 13 /*packets*/ * 8 /*uframes*/;
48 /* Bus speed is 500000 bytes/ms, so use a little less */
58 * handle_control_request() - handles all control transfers
59 * @udc: pointer to vudc
60 * @urb: the urb request to handle
61 * @setup: pointer to the setup data for a USB device control
63 * @status: pointer to request handling status
65 * Return 0 - if the request was handled
66 * 1 - if the request wasn't handles
69 * Adapted from drivers/usb/gadget/udc/dummy_hcd.c
71 static int handle_control_request(struct vudc *udc, struct urb *urb,
72 struct usb_ctrlrequest *setup,
80 w_index = le16_to_cpu(setup->wIndex);
81 w_value = le16_to_cpu(setup->wValue);
82 switch (setup->bRequest) {
83 case USB_REQ_SET_ADDRESS:
84 if (setup->bRequestType != DEV_REQUEST)
86 udc->address = w_value;
90 case USB_REQ_SET_FEATURE:
91 if (setup->bRequestType == DEV_REQUEST) {
94 case USB_DEVICE_REMOTE_WAKEUP:
96 case USB_DEVICE_B_HNP_ENABLE:
97 udc->gadget.b_hnp_enable = 1;
99 case USB_DEVICE_A_HNP_SUPPORT:
100 udc->gadget.a_hnp_support = 1;
102 case USB_DEVICE_A_ALT_HNP_SUPPORT:
103 udc->gadget.a_alt_hnp_support = 1;
106 ret_val = -EOPNOTSUPP;
109 udc->devstatus |= (1 << w_value);
112 } else if (setup->bRequestType == EP_REQUEST) {
114 ep2 = vudc_find_endpoint(udc, w_index);
115 if (!ep2 || ep2->ep.name == udc->ep[0].ep.name) {
116 ret_val = -EOPNOTSUPP;
124 case USB_REQ_CLEAR_FEATURE:
125 if (setup->bRequestType == DEV_REQUEST) {
128 case USB_DEVICE_REMOTE_WAKEUP:
129 w_value = USB_DEVICE_REMOTE_WAKEUP;
132 case USB_DEVICE_U1_ENABLE:
133 case USB_DEVICE_U2_ENABLE:
134 case USB_DEVICE_LTM_ENABLE:
135 ret_val = -EOPNOTSUPP;
138 ret_val = -EOPNOTSUPP;
142 udc->devstatus &= ~(1 << w_value);
145 } else if (setup->bRequestType == EP_REQUEST) {
147 ep2 = vudc_find_endpoint(udc, w_index);
149 ret_val = -EOPNOTSUPP;
158 case USB_REQ_GET_STATUS:
159 if (setup->bRequestType == DEV_INREQUEST
160 || setup->bRequestType == INTF_INREQUEST
161 || setup->bRequestType == EP_INREQUEST) {
164 * device: remote wakeup, selfpowered
168 buf = (char *)urb->transfer_buffer;
169 if (urb->transfer_buffer_length > 0) {
170 if (setup->bRequestType == EP_INREQUEST) {
171 ep2 = vudc_find_endpoint(udc, w_index);
173 ret_val = -EOPNOTSUPP;
176 buf[0] = ep2->halted;
177 } else if (setup->bRequestType ==
179 buf[0] = (u8)udc->devstatus;
183 if (urb->transfer_buffer_length > 1)
185 urb->actual_length = min_t(u32, 2,
186 urb->transfer_buffer_length);
195 /* Adapted from dummy_hcd.c ; caller must hold lock */
196 static int transfer(struct vudc *udc,
197 struct urb *urb, struct vep *ep, int limit)
199 struct vrequest *req;
202 /* if there's no request queued, the device is NAKing; return */
203 list_for_each_entry(req, &ep->req_queue, req_entry) {
204 unsigned int host_len, dev_len, len;
205 void *ubuf_pos, *rbuf_pos;
206 int is_short, to_host;
210 * 1..N packets of ep->ep.maxpacket each ... the last one
211 * may be short (including zero length).
213 * writer can send a zlp explicitly (length 0) or implicitly
214 * (length mod maxpacket zero, and 'zero' flag); they always
217 host_len = urb->transfer_buffer_length - urb->actual_length;
218 dev_len = req->req.length - req->req.actual;
219 len = min(host_len, dev_len);
221 to_host = usb_pipein(urb->pipe);
222 if (unlikely(len == 0))
225 /* send multiple of maxpacket first, then remainder */
226 if (len >= ep->ep.maxpacket) {
228 if (len % ep->ep.maxpacket > 0)
230 len -= len % ep->ep.maxpacket;
235 ubuf_pos = urb->transfer_buffer + urb->actual_length;
236 rbuf_pos = req->req.buf + req->req.actual;
238 if (urb->pipe & USB_DIR_IN)
239 memcpy(ubuf_pos, rbuf_pos, len);
241 memcpy(rbuf_pos, ubuf_pos, len);
243 urb->actual_length += len;
244 req->req.actual += len;
249 * short packets terminate, maybe with overflow/underflow.
250 * it's only really an error to write too much.
252 * partially filling a buffer optionally blocks queue advances
253 * (so completion handlers can clean up the queue) but we don't
254 * need to emulate such data-in-flight.
257 if (host_len == dev_len) {
260 } else if (to_host) {
262 if (dev_len > host_len)
263 urb->status = -EOVERFLOW;
268 if (host_len > dev_len)
269 req->req.status = -EOVERFLOW;
274 /* many requests terminate without a short packet */
275 /* also check if we need to send zlp */
277 if (req->req.length == req->req.actual) {
278 if (req->req.zero && to_host)
283 if (urb->transfer_buffer_length == urb->actual_length) {
284 if (urb->transfer_flags & URB_ZERO_PACKET &&
292 /* device side completion --> continuable */
293 if (req->req.status != -EINPROGRESS) {
295 list_del_init(&req->req_entry);
296 spin_unlock(&udc->lock);
297 usb_gadget_giveback_request(&ep->ep, &req->req);
298 spin_lock(&udc->lock);
300 /* requests might have been unlinked... */
304 /* host side completion --> terminate */
305 if (urb->status != -EINPROGRESS)
308 /* rescan to continue with any other queued i/o */
315 static void v_timer(struct timer_list *t)
317 struct vudc *udc = from_timer(udc, t, tr_timer.timer);
318 struct transfer_timer *timer = &udc->tr_timer;
319 struct urbp *urb_p, *tmp;
326 spin_lock_irqsave(&udc->lock, flags);
328 total = get_frame_limit(udc->gadget.speed);
329 if (total < 0) { /* unknown speed, or not set yet */
330 timer->state = VUDC_TR_IDLE;
331 spin_unlock_irqrestore(&udc->lock, flags);
334 /* is it next frame now? */
335 if (time_after(jiffies, timer->frame_start + msecs_to_jiffies(1))) {
336 timer->frame_limit = total;
337 /* FIXME: how to make it accurate? */
338 timer->frame_start = jiffies;
340 total = timer->frame_limit;
343 /* We have to clear ep0 flags separately as it's not on the list */
344 udc->ep[0].already_seen = 0;
345 list_for_each_entry(_ep, &udc->gadget.ep_list, ep_list) {
347 ep->already_seen = 0;
351 list_for_each_entry_safe(urb_p, tmp, &udc->urb_queue, urb_entry) {
352 struct urb *urb = urb_p->urb;
357 if (timer->state != VUDC_TR_RUNNING)
361 urb->status = -EPROTO;
365 /* Used up bandwidth? */
366 if (total <= 0 && ep->type == USB_ENDPOINT_XFER_BULK)
369 if (ep->already_seen)
371 ep->already_seen = 1;
372 if (ep == &udc->ep[0] && urb_p->new) {
376 if (ep->halted && !ep->setup_stage) {
377 urb->status = -EPIPE;
381 if (ep == &udc->ep[0] && ep->setup_stage) {
382 /* TODO - flush any stale requests */
386 ret = handle_control_request(udc, urb,
387 (struct usb_ctrlrequest *) urb->setup_packet,
390 spin_unlock(&udc->lock);
391 ret = udc->driver->setup(&udc->gadget,
392 (struct usb_ctrlrequest *)
394 spin_lock(&udc->lock);
397 /* no delays (max 64kb data stage) */
399 goto treat_control_like_bulk;
401 urb->status = -EPIPE;
402 urb->actual_length = 0;
409 case USB_ENDPOINT_XFER_ISOC:
411 urb->status = -EXDEV;
414 case USB_ENDPOINT_XFER_INT:
416 * TODO: figure out bandwidth guarantees
417 * for now, give unlimited bandwidth
419 limit += urb->transfer_buffer_length;
422 treat_control_like_bulk:
423 total -= transfer(udc, urb, ep, limit);
425 if (urb->status == -EINPROGRESS)
430 ep->already_seen = ep->setup_stage = 0;
432 spin_lock(&udc->lock_tx);
433 list_del(&urb_p->urb_entry);
434 if (!urb->unlinked) {
435 v_enqueue_ret_submit(udc, urb_p);
437 v_enqueue_ret_unlink(udc, urb_p->seqnum,
439 free_urbp_and_urb(urb_p);
441 wake_up(&udc->tx_waitq);
442 spin_unlock(&udc->lock_tx);
447 /* TODO - also wait on empty usb_request queues? */
448 if (list_empty(&udc->urb_queue))
449 timer->state = VUDC_TR_IDLE;
451 mod_timer(&timer->timer,
452 timer->frame_start + msecs_to_jiffies(1));
454 spin_unlock_irqrestore(&udc->lock, flags);
457 /* All timer functions are run with udc->lock held */
459 void v_init_timer(struct vudc *udc)
461 struct transfer_timer *t = &udc->tr_timer;
463 timer_setup(&t->timer, v_timer, 0);
464 t->state = VUDC_TR_STOPPED;
467 void v_start_timer(struct vudc *udc)
469 struct transfer_timer *t = &udc->tr_timer;
471 dev_dbg(&udc->pdev->dev, "timer start");
473 case VUDC_TR_RUNNING:
476 return v_kick_timer(udc, jiffies);
477 case VUDC_TR_STOPPED:
478 t->state = VUDC_TR_IDLE;
479 t->frame_start = jiffies;
480 t->frame_limit = get_frame_limit(udc->gadget.speed);
481 return v_kick_timer(udc, jiffies);
485 void v_kick_timer(struct vudc *udc, unsigned long time)
487 struct transfer_timer *t = &udc->tr_timer;
489 dev_dbg(&udc->pdev->dev, "timer kick");
491 case VUDC_TR_RUNNING:
494 t->state = VUDC_TR_RUNNING;
496 case VUDC_TR_STOPPED:
497 /* we may want to kick timer to unqueue urbs */
498 mod_timer(&t->timer, time);
502 void v_stop_timer(struct vudc *udc)
504 struct transfer_timer *t = &udc->tr_timer;
506 /* timer itself will take care of stopping */
507 dev_dbg(&udc->pdev->dev, "timer stop");
508 t->state = VUDC_TR_STOPPED;