1 // SPDX-License-Identifier: GPL-2.0+
3 * Copyright (C) 2015 Karol Kosik <karo9@interia.eu>
4 * Copyright (C) 2015-2016 Samsung Electronics
5 * Igor Kotrasinski <i.kotrasinsk@samsung.com>
9 #include <linux/list.h>
10 #include <linux/kthread.h>
12 #include "usbip_common.h"
15 static int alloc_urb_from_cmd(struct urb **urbp,
16 struct usbip_header *pdu, u8 type)
20 if (type == USB_ENDPOINT_XFER_ISOC)
21 urb = usb_alloc_urb(pdu->u.cmd_submit.number_of_packets,
24 urb = usb_alloc_urb(0, GFP_KERNEL);
29 usbip_pack_pdu(pdu, urb, USBIP_CMD_SUBMIT, 0);
31 if (urb->transfer_buffer_length > 0) {
32 urb->transfer_buffer = kzalloc(urb->transfer_buffer_length,
34 if (!urb->transfer_buffer)
38 urb->setup_packet = kmemdup(&pdu->u.cmd_submit.setup, 8,
40 if (!urb->setup_packet)
44 * FIXME - we only setup pipe enough for usbip functions
47 urb->pipe |= pdu->base.direction == USBIP_DIR_IN ?
48 USB_DIR_IN : USB_DIR_OUT;
54 kfree(urb->transfer_buffer);
55 urb->transfer_buffer = NULL;
62 static int v_recv_cmd_unlink(struct vudc *udc,
63 struct usbip_header *pdu)
68 spin_lock_irqsave(&udc->lock, flags);
69 list_for_each_entry(urb_p, &udc->urb_queue, urb_entry) {
70 if (urb_p->seqnum != pdu->u.cmd_unlink.seqnum)
72 urb_p->urb->unlinked = -ECONNRESET;
73 urb_p->seqnum = pdu->base.seqnum;
74 v_kick_timer(udc, jiffies);
75 spin_unlock_irqrestore(&udc->lock, flags);
78 /* Not found, completed / not queued */
79 spin_lock(&udc->lock_tx);
80 v_enqueue_ret_unlink(udc, pdu->base.seqnum, 0);
81 wake_up(&udc->tx_waitq);
82 spin_unlock(&udc->lock_tx);
83 spin_unlock_irqrestore(&udc->lock, flags);
88 static int v_recv_cmd_submit(struct vudc *udc,
89 struct usbip_header *pdu)
98 usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC);
102 /* base.ep is pipeendpoint(pipe) */
103 address = pdu->base.ep;
104 if (pdu->base.direction == USBIP_DIR_IN)
105 address |= USB_DIR_IN;
107 spin_lock_irq(&udc->lock);
108 urb_p->ep = vudc_find_endpoint(udc, address);
110 /* we don't know the type, there may be isoc data! */
111 dev_err(&udc->pdev->dev, "request to nonexistent endpoint");
112 spin_unlock_irq(&udc->lock);
113 usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_TCP);
117 urb_p->type = urb_p->ep->type;
118 spin_unlock_irq(&udc->lock);
121 urb_p->seqnum = pdu->base.seqnum;
123 ret = alloc_urb_from_cmd(&urb_p->urb, pdu, urb_p->ep->type);
125 usbip_event_add(&udc->ud, VUDC_EVENT_ERROR_MALLOC);
130 urb_p->urb->status = -EINPROGRESS;
132 /* FIXME: more pipe setup to please usbip_common */
133 urb_p->urb->pipe &= ~(3 << 30);
134 switch (urb_p->ep->type) {
135 case USB_ENDPOINT_XFER_BULK:
136 urb_p->urb->pipe |= (PIPE_BULK << 30);
138 case USB_ENDPOINT_XFER_INT:
139 urb_p->urb->pipe |= (PIPE_INTERRUPT << 30);
141 case USB_ENDPOINT_XFER_CONTROL:
142 urb_p->urb->pipe |= (PIPE_CONTROL << 30);
144 case USB_ENDPOINT_XFER_ISOC:
145 urb_p->urb->pipe |= (PIPE_ISOCHRONOUS << 30);
148 ret = usbip_recv_xbuff(&udc->ud, urb_p->urb);
152 ret = usbip_recv_iso(&udc->ud, urb_p->urb);
156 spin_lock_irqsave(&udc->lock, flags);
157 v_kick_timer(udc, jiffies);
158 list_add_tail(&urb_p->urb_entry, &udc->urb_queue);
159 spin_unlock_irqrestore(&udc->lock, flags);
164 free_urbp_and_urb(urb_p);
168 static int v_rx_pdu(struct usbip_device *ud)
171 struct usbip_header pdu;
172 struct vudc *udc = container_of(ud, struct vudc, ud);
174 memset(&pdu, 0, sizeof(pdu));
175 ret = usbip_recv(ud->tcp_socket, &pdu, sizeof(pdu));
176 if (ret != sizeof(pdu)) {
177 usbip_event_add(ud, VUDC_EVENT_ERROR_TCP);
182 usbip_header_correct_endian(&pdu, 0);
184 spin_lock_irq(&ud->lock);
185 ret = (ud->status == SDEV_ST_USED);
186 spin_unlock_irq(&ud->lock);
188 usbip_event_add(ud, VUDC_EVENT_ERROR_TCP);
192 switch (pdu.base.command) {
193 case USBIP_CMD_UNLINK:
194 ret = v_recv_cmd_unlink(udc, &pdu);
196 case USBIP_CMD_SUBMIT:
197 ret = v_recv_cmd_submit(udc, &pdu);
201 pr_err("rx: unknown command");
207 int v_rx_loop(void *data)
209 struct usbip_device *ud = data;
212 while (!kthread_should_stop()) {
213 if (usbip_event_happened(ud))
217 pr_warn("v_rx exit with error %d", ret);