Merge remote-tracking branches 'asoc/topic/ad193x', 'asoc/topic/alc5632', 'asoc/topic...
[linux-2.6-block.git] / drivers / usb / dwc3 / gadget.c
CommitLineData
72246da4
FB
1/**
2 * gadget.c - DesignWare USB3 DRD Controller Gadget Framework Link
3 *
4 * Copyright (C) 2010-2011 Texas Instruments Incorporated - http://www.ti.com
72246da4
FB
5 *
6 * Authors: Felipe Balbi <balbi@ti.com>,
7 * Sebastian Andrzej Siewior <bigeasy@linutronix.de>
8 *
5945f789
FB
9 * This program is free software: you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 of
11 * the License as published by the Free Software Foundation.
72246da4 12 *
5945f789
FB
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
72246da4
FB
17 */
18
19#include <linux/kernel.h>
20#include <linux/delay.h>
21#include <linux/slab.h>
22#include <linux/spinlock.h>
23#include <linux/platform_device.h>
24#include <linux/pm_runtime.h>
25#include <linux/interrupt.h>
26#include <linux/io.h>
27#include <linux/list.h>
28#include <linux/dma-mapping.h>
29
30#include <linux/usb/ch9.h>
31#include <linux/usb/gadget.h>
32
80977dc9 33#include "debug.h"
72246da4
FB
34#include "core.h"
35#include "gadget.h"
36#include "io.h"
37
04a9bfcd
FB
38/**
39 * dwc3_gadget_set_test_mode - Enables USB2 Test Modes
40 * @dwc: pointer to our context structure
41 * @mode: the mode to set (J, K SE0 NAK, Force Enable)
42 *
43 * Caller should take care of locking. This function will
44 * return 0 on success or -EINVAL if wrong Test Selector
45 * is passed
46 */
47int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode)
48{
49 u32 reg;
50
51 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
52 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
53
54 switch (mode) {
55 case TEST_J:
56 case TEST_K:
57 case TEST_SE0_NAK:
58 case TEST_PACKET:
59 case TEST_FORCE_EN:
60 reg |= mode << 1;
61 break;
62 default:
63 return -EINVAL;
64 }
65
66 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
67
68 return 0;
69}
70
911f1f88
PZ
71/**
72 * dwc3_gadget_get_link_state - Gets current state of USB Link
73 * @dwc: pointer to our context structure
74 *
75 * Caller should take care of locking. This function will
76 * return the link state on success (>= 0) or -ETIMEDOUT.
77 */
78int dwc3_gadget_get_link_state(struct dwc3 *dwc)
79{
80 u32 reg;
81
82 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
83
84 return DWC3_DSTS_USBLNKST(reg);
85}
86
8598bde7
FB
87/**
88 * dwc3_gadget_set_link_state - Sets USB Link to a particular State
89 * @dwc: pointer to our context structure
90 * @state: the state to put link into
91 *
92 * Caller should take care of locking. This function will
aee63e3c 93 * return 0 on success or -ETIMEDOUT.
8598bde7
FB
94 */
95int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state)
96{
aee63e3c 97 int retries = 10000;
8598bde7
FB
98 u32 reg;
99
802fde98
PZ
100 /*
101 * Wait until device controller is ready. Only applies to 1.94a and
102 * later RTL.
103 */
104 if (dwc->revision >= DWC3_REVISION_194A) {
105 while (--retries) {
106 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
107 if (reg & DWC3_DSTS_DCNRD)
108 udelay(5);
109 else
110 break;
111 }
112
113 if (retries <= 0)
114 return -ETIMEDOUT;
115 }
116
8598bde7
FB
117 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
118 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
119
120 /* set requested state */
121 reg |= DWC3_DCTL_ULSTCHNGREQ(state);
122 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
123
802fde98
PZ
124 /*
125 * The following code is racy when called from dwc3_gadget_wakeup,
126 * and is not needed, at least on newer versions
127 */
128 if (dwc->revision >= DWC3_REVISION_194A)
129 return 0;
130
8598bde7 131 /* wait for a change in DSTS */
aed430e5 132 retries = 10000;
8598bde7
FB
133 while (--retries) {
134 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
135
8598bde7
FB
136 if (DWC3_DSTS_USBLNKST(reg) == state)
137 return 0;
138
aee63e3c 139 udelay(5);
8598bde7
FB
140 }
141
142 dev_vdbg(dwc->dev, "link state change request timed out\n");
143
144 return -ETIMEDOUT;
145}
146
457e84b6
FB
147/**
148 * dwc3_gadget_resize_tx_fifos - reallocate fifo spaces for current use-case
149 * @dwc: pointer to our context structure
150 *
151 * This function will a best effort FIFO allocation in order
152 * to improve FIFO usage and throughput, while still allowing
153 * us to enable as many endpoints as possible.
154 *
155 * Keep in mind that this operation will be highly dependent
156 * on the configured size for RAM1 - which contains TxFifo -,
157 * the amount of endpoints enabled on coreConsultant tool, and
158 * the width of the Master Bus.
159 *
160 * In the ideal world, we would always be able to satisfy the
161 * following equation:
162 *
163 * ((512 + 2 * MDWIDTH-Bytes) + (Number of IN Endpoints - 1) * \
164 * (3 * (1024 + MDWIDTH-Bytes) + MDWIDTH-Bytes)) / MDWIDTH-Bytes
165 *
166 * Unfortunately, due to many variables that's not always the case.
167 */
168int dwc3_gadget_resize_tx_fifos(struct dwc3 *dwc)
169{
170 int last_fifo_depth = 0;
171 int ram1_depth;
172 int fifo_size;
173 int mdwidth;
174 int num;
175
176 if (!dwc->needs_fifo_resize)
177 return 0;
178
179 ram1_depth = DWC3_RAM1_DEPTH(dwc->hwparams.hwparams7);
180 mdwidth = DWC3_MDWIDTH(dwc->hwparams.hwparams0);
181
182 /* MDWIDTH is represented in bits, we need it in bytes */
183 mdwidth >>= 3;
184
185 /*
186 * FIXME For now we will only allocate 1 wMaxPacketSize space
187 * for each enabled endpoint, later patches will come to
188 * improve this algorithm so that we better use the internal
189 * FIFO space
190 */
32702e96
JP
191 for (num = 0; num < dwc->num_in_eps; num++) {
192 /* bit0 indicates direction; 1 means IN ep */
193 struct dwc3_ep *dep = dwc->eps[(num << 1) | 1];
2e81c36a 194 int mult = 1;
457e84b6
FB
195 int tmp;
196
457e84b6
FB
197 if (!(dep->flags & DWC3_EP_ENABLED))
198 continue;
199
16e78db7
IS
200 if (usb_endpoint_xfer_bulk(dep->endpoint.desc)
201 || usb_endpoint_xfer_isoc(dep->endpoint.desc))
2e81c36a
FB
202 mult = 3;
203
204 /*
205 * REVISIT: the following assumes we will always have enough
206 * space available on the FIFO RAM for all possible use cases.
207 * Make sure that's true somehow and change FIFO allocation
208 * accordingly.
209 *
210 * If we have Bulk or Isochronous endpoints, we want
211 * them to be able to be very, very fast. So we're giving
212 * those endpoints a fifo_size which is enough for 3 full
213 * packets
214 */
215 tmp = mult * (dep->endpoint.maxpacket + mdwidth);
457e84b6
FB
216 tmp += mdwidth;
217
218 fifo_size = DIV_ROUND_UP(tmp, mdwidth);
2e81c36a 219
457e84b6
FB
220 fifo_size |= (last_fifo_depth << 16);
221
222 dev_vdbg(dwc->dev, "%s: Fifo Addr %04x Size %d\n",
223 dep->name, last_fifo_depth, fifo_size & 0xffff);
224
32702e96 225 dwc3_writel(dwc->regs, DWC3_GTXFIFOSIZ(num), fifo_size);
457e84b6
FB
226
227 last_fifo_depth += (fifo_size & 0xffff);
228 }
229
230 return 0;
231}
232
72246da4
FB
233void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req,
234 int status)
235{
236 struct dwc3 *dwc = dep->dwc;
e5ba5ec8 237 int i;
72246da4
FB
238
239 if (req->queued) {
e5ba5ec8
PA
240 i = 0;
241 do {
eeb720fb 242 dep->busy_slot++;
e5ba5ec8
PA
243 /*
244 * Skip LINK TRB. We can't use req->trb and check for
245 * DWC3_TRBCTL_LINK_TRB because it points the TRB we
246 * just completed (not the LINK TRB).
247 */
248 if (((dep->busy_slot & DWC3_TRB_MASK) ==
249 DWC3_TRB_NUM- 1) &&
16e78db7 250 usb_endpoint_xfer_isoc(dep->endpoint.desc))
e5ba5ec8
PA
251 dep->busy_slot++;
252 } while(++i < req->request.num_mapped_sgs);
c9fda7d6 253 req->queued = false;
72246da4
FB
254 }
255 list_del(&req->list);
eeb720fb 256 req->trb = NULL;
72246da4
FB
257
258 if (req->request.status == -EINPROGRESS)
259 req->request.status = status;
260
0416e494
PA
261 if (dwc->ep0_bounced && dep->number == 0)
262 dwc->ep0_bounced = false;
263 else
264 usb_gadget_unmap_request(&dwc->gadget, &req->request,
265 req->direction);
72246da4
FB
266
267 dev_dbg(dwc->dev, "request %p from %s completed %d/%d ===> %d\n",
268 req, dep->name, req->request.actual,
269 req->request.length, status);
2c4cbe6e 270 trace_dwc3_gadget_giveback(req);
72246da4
FB
271
272 spin_unlock(&dwc->lock);
304f7e5e 273 usb_gadget_giveback_request(&dep->endpoint, &req->request);
72246da4
FB
274 spin_lock(&dwc->lock);
275}
276
3ece0ec4 277int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param)
b09bb642
FB
278{
279 u32 timeout = 500;
280 u32 reg;
281
2c4cbe6e 282 trace_dwc3_gadget_generic_cmd(cmd, param);
427c3df6 283
b09bb642
FB
284 dwc3_writel(dwc->regs, DWC3_DGCMDPAR, param);
285 dwc3_writel(dwc->regs, DWC3_DGCMD, cmd | DWC3_DGCMD_CMDACT);
286
287 do {
288 reg = dwc3_readl(dwc->regs, DWC3_DGCMD);
289 if (!(reg & DWC3_DGCMD_CMDACT)) {
290 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
291 DWC3_DGCMD_STATUS(reg));
292 return 0;
293 }
294
295 /*
296 * We can't sleep here, because it's also called from
297 * interrupt context.
298 */
299 timeout--;
300 if (!timeout)
301 return -ETIMEDOUT;
302 udelay(1);
303 } while (1);
304}
305
72246da4
FB
306int dwc3_send_gadget_ep_cmd(struct dwc3 *dwc, unsigned ep,
307 unsigned cmd, struct dwc3_gadget_ep_cmd_params *params)
308{
309 struct dwc3_ep *dep = dwc->eps[ep];
61d58242 310 u32 timeout = 500;
72246da4
FB
311 u32 reg;
312
2c4cbe6e 313 trace_dwc3_gadget_ep_cmd(dep, cmd, params);
72246da4 314
dc1c70a7
FB
315 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR0(ep), params->param0);
316 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR1(ep), params->param1);
317 dwc3_writel(dwc->regs, DWC3_DEPCMDPAR2(ep), params->param2);
72246da4
FB
318
319 dwc3_writel(dwc->regs, DWC3_DEPCMD(ep), cmd | DWC3_DEPCMD_CMDACT);
320 do {
321 reg = dwc3_readl(dwc->regs, DWC3_DEPCMD(ep));
322 if (!(reg & DWC3_DEPCMD_CMDACT)) {
164f6e14
FB
323 dev_vdbg(dwc->dev, "Command Complete --> %d\n",
324 DWC3_DEPCMD_STATUS(reg));
72246da4
FB
325 return 0;
326 }
327
328 /*
72246da4
FB
329 * We can't sleep here, because it is also called from
330 * interrupt context.
331 */
332 timeout--;
333 if (!timeout)
334 return -ETIMEDOUT;
335
61d58242 336 udelay(1);
72246da4
FB
337 } while (1);
338}
339
340static dma_addr_t dwc3_trb_dma_offset(struct dwc3_ep *dep,
f6bafc6a 341 struct dwc3_trb *trb)
72246da4 342{
c439ef87 343 u32 offset = (char *) trb - (char *) dep->trb_pool;
72246da4
FB
344
345 return dep->trb_pool_dma + offset;
346}
347
348static int dwc3_alloc_trb_pool(struct dwc3_ep *dep)
349{
350 struct dwc3 *dwc = dep->dwc;
351
352 if (dep->trb_pool)
353 return 0;
354
355 if (dep->number == 0 || dep->number == 1)
356 return 0;
357
358 dep->trb_pool = dma_alloc_coherent(dwc->dev,
359 sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
360 &dep->trb_pool_dma, GFP_KERNEL);
361 if (!dep->trb_pool) {
362 dev_err(dep->dwc->dev, "failed to allocate trb pool for %s\n",
363 dep->name);
364 return -ENOMEM;
365 }
366
367 return 0;
368}
369
370static void dwc3_free_trb_pool(struct dwc3_ep *dep)
371{
372 struct dwc3 *dwc = dep->dwc;
373
374 dma_free_coherent(dwc->dev, sizeof(struct dwc3_trb) * DWC3_TRB_NUM,
375 dep->trb_pool, dep->trb_pool_dma);
376
377 dep->trb_pool = NULL;
378 dep->trb_pool_dma = 0;
379}
380
381static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
382{
383 struct dwc3_gadget_ep_cmd_params params;
384 u32 cmd;
385
386 memset(&params, 0x00, sizeof(params));
387
388 if (dep->number != 1) {
389 cmd = DWC3_DEPCMD_DEPSTARTCFG;
390 /* XferRscIdx == 0 for ep0 and 2 for the remaining */
b23c8439
PZ
391 if (dep->number > 1) {
392 if (dwc->start_config_issued)
393 return 0;
394 dwc->start_config_issued = true;
72246da4 395 cmd |= DWC3_DEPCMD_PARAM(2);
b23c8439 396 }
72246da4
FB
397
398 return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, &params);
399 }
400
401 return 0;
402}
403
404static int dwc3_gadget_set_ep_config(struct dwc3 *dwc, struct dwc3_ep *dep,
c90bfaec 405 const struct usb_endpoint_descriptor *desc,
4b345c9a 406 const struct usb_ss_ep_comp_descriptor *comp_desc,
265b70a7 407 bool ignore, bool restore)
72246da4
FB
408{
409 struct dwc3_gadget_ep_cmd_params params;
410
411 memset(&params, 0x00, sizeof(params));
412
dc1c70a7 413 params.param0 = DWC3_DEPCFG_EP_TYPE(usb_endpoint_type(desc))
d2e9a13a
CP
414 | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc));
415
416 /* Burst size is only needed in SuperSpeed mode */
417 if (dwc->gadget.speed == USB_SPEED_SUPER) {
418 u32 burst = dep->endpoint.maxburst - 1;
419
420 params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst);
421 }
72246da4 422
4b345c9a
FB
423 if (ignore)
424 params.param0 |= DWC3_DEPCFG_IGN_SEQ_NUM;
425
265b70a7
PZ
426 if (restore) {
427 params.param0 |= DWC3_DEPCFG_ACTION_RESTORE;
428 params.param2 |= dep->saved_state;
429 }
430
dc1c70a7
FB
431 params.param1 = DWC3_DEPCFG_XFER_COMPLETE_EN
432 | DWC3_DEPCFG_XFER_NOT_READY_EN;
72246da4 433
18b7ede5 434 if (usb_ss_max_streams(comp_desc) && usb_endpoint_xfer_bulk(desc)) {
dc1c70a7
FB
435 params.param1 |= DWC3_DEPCFG_STREAM_CAPABLE
436 | DWC3_DEPCFG_STREAM_EVENT_EN;
879631aa
FB
437 dep->stream_capable = true;
438 }
439
0b93a4c8 440 if (!usb_endpoint_xfer_control(desc))
dc1c70a7 441 params.param1 |= DWC3_DEPCFG_XFER_IN_PROGRESS_EN;
72246da4
FB
442
443 /*
444 * We are doing 1:1 mapping for endpoints, meaning
445 * Physical Endpoints 2 maps to Logical Endpoint 2 and
446 * so on. We consider the direction bit as part of the physical
447 * endpoint number. So USB endpoint 0x81 is 0x03.
448 */
dc1c70a7 449 params.param1 |= DWC3_DEPCFG_EP_NUMBER(dep->number);
72246da4
FB
450
451 /*
452 * We must use the lower 16 TX FIFOs even though
453 * HW might have more
454 */
455 if (dep->direction)
dc1c70a7 456 params.param0 |= DWC3_DEPCFG_FIFO_NUMBER(dep->number >> 1);
72246da4
FB
457
458 if (desc->bInterval) {
dc1c70a7 459 params.param1 |= DWC3_DEPCFG_BINTERVAL_M1(desc->bInterval - 1);
72246da4
FB
460 dep->interval = 1 << (desc->bInterval - 1);
461 }
462
463 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
464 DWC3_DEPCMD_SETEPCONFIG, &params);
465}
466
467static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep)
468{
469 struct dwc3_gadget_ep_cmd_params params;
470
471 memset(&params, 0x00, sizeof(params));
472
dc1c70a7 473 params.param0 = DWC3_DEPXFERCFG_NUM_XFER_RES(1);
72246da4
FB
474
475 return dwc3_send_gadget_ep_cmd(dwc, dep->number,
476 DWC3_DEPCMD_SETTRANSFRESOURCE, &params);
477}
478
479/**
480 * __dwc3_gadget_ep_enable - Initializes a HW endpoint
481 * @dep: endpoint to be initialized
482 * @desc: USB Endpoint Descriptor
483 *
484 * Caller should take care of locking
485 */
486static int __dwc3_gadget_ep_enable(struct dwc3_ep *dep,
c90bfaec 487 const struct usb_endpoint_descriptor *desc,
4b345c9a 488 const struct usb_ss_ep_comp_descriptor *comp_desc,
265b70a7 489 bool ignore, bool restore)
72246da4
FB
490{
491 struct dwc3 *dwc = dep->dwc;
492 u32 reg;
b09e99ee 493 int ret;
72246da4 494
ff62d6b6
FB
495 dev_vdbg(dwc->dev, "Enabling %s\n", dep->name);
496
72246da4
FB
497 if (!(dep->flags & DWC3_EP_ENABLED)) {
498 ret = dwc3_gadget_start_config(dwc, dep);
499 if (ret)
500 return ret;
501 }
502
265b70a7
PZ
503 ret = dwc3_gadget_set_ep_config(dwc, dep, desc, comp_desc, ignore,
504 restore);
72246da4
FB
505 if (ret)
506 return ret;
507
508 if (!(dep->flags & DWC3_EP_ENABLED)) {
f6bafc6a
FB
509 struct dwc3_trb *trb_st_hw;
510 struct dwc3_trb *trb_link;
72246da4
FB
511
512 ret = dwc3_gadget_set_xfer_resource(dwc, dep);
513 if (ret)
514 return ret;
515
16e78db7 516 dep->endpoint.desc = desc;
c90bfaec 517 dep->comp_desc = comp_desc;
72246da4
FB
518 dep->type = usb_endpoint_type(desc);
519 dep->flags |= DWC3_EP_ENABLED;
520
521 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
522 reg |= DWC3_DALEPENA_EP(dep->number);
523 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
524
525 if (!usb_endpoint_xfer_isoc(desc))
526 return 0;
527
1d046793 528 /* Link TRB for ISOC. The HWO bit is never reset */
72246da4
FB
529 trb_st_hw = &dep->trb_pool[0];
530
f6bafc6a 531 trb_link = &dep->trb_pool[DWC3_TRB_NUM - 1];
1200a82a 532 memset(trb_link, 0, sizeof(*trb_link));
72246da4 533
f6bafc6a
FB
534 trb_link->bpl = lower_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
535 trb_link->bph = upper_32_bits(dwc3_trb_dma_offset(dep, trb_st_hw));
536 trb_link->ctrl |= DWC3_TRBCTL_LINK_TRB;
537 trb_link->ctrl |= DWC3_TRB_CTRL_HWO;
72246da4
FB
538 }
539
540 return 0;
541}
542
b992e681 543static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force);
624407f9 544static void dwc3_remove_requests(struct dwc3 *dwc, struct dwc3_ep *dep)
72246da4
FB
545{
546 struct dwc3_request *req;
547
ea53b882 548 if (!list_empty(&dep->req_queued)) {
b992e681 549 dwc3_stop_active_transfer(dwc, dep->number, true);
624407f9 550
57911504 551 /* - giveback all requests to gadget driver */
1591633e
PA
552 while (!list_empty(&dep->req_queued)) {
553 req = next_request(&dep->req_queued);
554
555 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
556 }
ea53b882
FB
557 }
558
72246da4
FB
559 while (!list_empty(&dep->request_list)) {
560 req = next_request(&dep->request_list);
561
624407f9 562 dwc3_gadget_giveback(dep, req, -ESHUTDOWN);
72246da4 563 }
72246da4
FB
564}
565
566/**
567 * __dwc3_gadget_ep_disable - Disables a HW endpoint
568 * @dep: the endpoint to disable
569 *
624407f9
SAS
570 * This function also removes requests which are currently processed ny the
571 * hardware and those which are not yet scheduled.
572 * Caller should take care of locking.
72246da4 573 */
72246da4
FB
574static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
575{
576 struct dwc3 *dwc = dep->dwc;
577 u32 reg;
578
624407f9 579 dwc3_remove_requests(dwc, dep);
72246da4 580
687ef981
FB
581 /* make sure HW endpoint isn't stalled */
582 if (dep->flags & DWC3_EP_STALL)
7a608559 583 __dwc3_gadget_ep_set_halt(dep, 0, false);
687ef981 584
72246da4
FB
585 reg = dwc3_readl(dwc->regs, DWC3_DALEPENA);
586 reg &= ~DWC3_DALEPENA_EP(dep->number);
587 dwc3_writel(dwc->regs, DWC3_DALEPENA, reg);
588
879631aa 589 dep->stream_capable = false;
f9c56cdd 590 dep->endpoint.desc = NULL;
c90bfaec 591 dep->comp_desc = NULL;
72246da4 592 dep->type = 0;
879631aa 593 dep->flags = 0;
72246da4
FB
594
595 return 0;
596}
597
598/* -------------------------------------------------------------------------- */
599
600static int dwc3_gadget_ep0_enable(struct usb_ep *ep,
601 const struct usb_endpoint_descriptor *desc)
602{
603 return -EINVAL;
604}
605
606static int dwc3_gadget_ep0_disable(struct usb_ep *ep)
607{
608 return -EINVAL;
609}
610
611/* -------------------------------------------------------------------------- */
612
613static int dwc3_gadget_ep_enable(struct usb_ep *ep,
614 const struct usb_endpoint_descriptor *desc)
615{
616 struct dwc3_ep *dep;
617 struct dwc3 *dwc;
618 unsigned long flags;
619 int ret;
620
621 if (!ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
622 pr_debug("dwc3: invalid parameters\n");
623 return -EINVAL;
624 }
625
626 if (!desc->wMaxPacketSize) {
627 pr_debug("dwc3: missing wMaxPacketSize\n");
628 return -EINVAL;
629 }
630
631 dep = to_dwc3_ep(ep);
632 dwc = dep->dwc;
633
c6f83f38
FB
634 if (dep->flags & DWC3_EP_ENABLED) {
635 dev_WARN_ONCE(dwc->dev, true, "%s is already enabled\n",
636 dep->name);
637 return 0;
638 }
639
72246da4
FB
640 switch (usb_endpoint_type(desc)) {
641 case USB_ENDPOINT_XFER_CONTROL:
27a78d6a 642 strlcat(dep->name, "-control", sizeof(dep->name));
72246da4
FB
643 break;
644 case USB_ENDPOINT_XFER_ISOC:
27a78d6a 645 strlcat(dep->name, "-isoc", sizeof(dep->name));
72246da4
FB
646 break;
647 case USB_ENDPOINT_XFER_BULK:
27a78d6a 648 strlcat(dep->name, "-bulk", sizeof(dep->name));
72246da4
FB
649 break;
650 case USB_ENDPOINT_XFER_INT:
27a78d6a 651 strlcat(dep->name, "-int", sizeof(dep->name));
72246da4
FB
652 break;
653 default:
654 dev_err(dwc->dev, "invalid endpoint transfer type\n");
655 }
656
72246da4 657 spin_lock_irqsave(&dwc->lock, flags);
265b70a7 658 ret = __dwc3_gadget_ep_enable(dep, desc, ep->comp_desc, false, false);
72246da4
FB
659 spin_unlock_irqrestore(&dwc->lock, flags);
660
661 return ret;
662}
663
664static int dwc3_gadget_ep_disable(struct usb_ep *ep)
665{
666 struct dwc3_ep *dep;
667 struct dwc3 *dwc;
668 unsigned long flags;
669 int ret;
670
671 if (!ep) {
672 pr_debug("dwc3: invalid parameters\n");
673 return -EINVAL;
674 }
675
676 dep = to_dwc3_ep(ep);
677 dwc = dep->dwc;
678
679 if (!(dep->flags & DWC3_EP_ENABLED)) {
680 dev_WARN_ONCE(dwc->dev, true, "%s is already disabled\n",
681 dep->name);
682 return 0;
683 }
684
685 snprintf(dep->name, sizeof(dep->name), "ep%d%s",
686 dep->number >> 1,
687 (dep->number & 1) ? "in" : "out");
688
689 spin_lock_irqsave(&dwc->lock, flags);
690 ret = __dwc3_gadget_ep_disable(dep);
691 spin_unlock_irqrestore(&dwc->lock, flags);
692
693 return ret;
694}
695
696static struct usb_request *dwc3_gadget_ep_alloc_request(struct usb_ep *ep,
697 gfp_t gfp_flags)
698{
699 struct dwc3_request *req;
700 struct dwc3_ep *dep = to_dwc3_ep(ep);
72246da4
FB
701
702 req = kzalloc(sizeof(*req), gfp_flags);
734d5a53 703 if (!req)
72246da4 704 return NULL;
72246da4
FB
705
706 req->epnum = dep->number;
707 req->dep = dep;
72246da4 708
2c4cbe6e
FB
709 trace_dwc3_alloc_request(req);
710
72246da4
FB
711 return &req->request;
712}
713
714static void dwc3_gadget_ep_free_request(struct usb_ep *ep,
715 struct usb_request *request)
716{
717 struct dwc3_request *req = to_dwc3_request(request);
718
2c4cbe6e 719 trace_dwc3_free_request(req);
72246da4
FB
720 kfree(req);
721}
722
c71fc37c
FB
723/**
724 * dwc3_prepare_one_trb - setup one TRB from one request
725 * @dep: endpoint for which this request is prepared
726 * @req: dwc3_request pointer
727 */
68e823e2 728static void dwc3_prepare_one_trb(struct dwc3_ep *dep,
eeb720fb 729 struct dwc3_request *req, dma_addr_t dma,
e5ba5ec8 730 unsigned length, unsigned last, unsigned chain, unsigned node)
c71fc37c 731{
eeb720fb 732 struct dwc3 *dwc = dep->dwc;
f6bafc6a 733 struct dwc3_trb *trb;
c71fc37c 734
eeb720fb
FB
735 dev_vdbg(dwc->dev, "%s: req %p dma %08llx length %d%s%s\n",
736 dep->name, req, (unsigned long long) dma,
737 length, last ? " last" : "",
738 chain ? " chain" : "");
739
915e202a
PA
740
741 trb = &dep->trb_pool[dep->free_slot & DWC3_TRB_MASK];
c71fc37c 742
eeb720fb
FB
743 if (!req->trb) {
744 dwc3_gadget_move_request_queued(req);
f6bafc6a
FB
745 req->trb = trb;
746 req->trb_dma = dwc3_trb_dma_offset(dep, trb);
e5ba5ec8 747 req->start_slot = dep->free_slot & DWC3_TRB_MASK;
eeb720fb 748 }
c71fc37c 749
e5ba5ec8 750 dep->free_slot++;
5cd8c48d
ZJC
751 /* Skip the LINK-TRB on ISOC */
752 if (((dep->free_slot & DWC3_TRB_MASK) == DWC3_TRB_NUM - 1) &&
753 usb_endpoint_xfer_isoc(dep->endpoint.desc))
754 dep->free_slot++;
e5ba5ec8 755
f6bafc6a
FB
756 trb->size = DWC3_TRB_SIZE_LENGTH(length);
757 trb->bpl = lower_32_bits(dma);
758 trb->bph = upper_32_bits(dma);
c71fc37c 759
16e78db7 760 switch (usb_endpoint_type(dep->endpoint.desc)) {
c71fc37c 761 case USB_ENDPOINT_XFER_CONTROL:
f6bafc6a 762 trb->ctrl = DWC3_TRBCTL_CONTROL_SETUP;
c71fc37c
FB
763 break;
764
765 case USB_ENDPOINT_XFER_ISOC:
e5ba5ec8
PA
766 if (!node)
767 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS_FIRST;
768 else
769 trb->ctrl = DWC3_TRBCTL_ISOCHRONOUS;
c71fc37c
FB
770 break;
771
772 case USB_ENDPOINT_XFER_BULK:
773 case USB_ENDPOINT_XFER_INT:
f6bafc6a 774 trb->ctrl = DWC3_TRBCTL_NORMAL;
c71fc37c
FB
775 break;
776 default:
777 /*
778 * This is only possible with faulty memory because we
779 * checked it already :)
780 */
781 BUG();
782 }
783
f3af3651
FB
784 if (!req->request.no_interrupt && !chain)
785 trb->ctrl |= DWC3_TRB_CTRL_IOC;
786
16e78db7 787 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
f6bafc6a
FB
788 trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI;
789 trb->ctrl |= DWC3_TRB_CTRL_CSP;
e5ba5ec8
PA
790 } else if (last) {
791 trb->ctrl |= DWC3_TRB_CTRL_LST;
f6bafc6a 792 }
c71fc37c 793
e5ba5ec8
PA
794 if (chain)
795 trb->ctrl |= DWC3_TRB_CTRL_CHN;
796
16e78db7 797 if (usb_endpoint_xfer_bulk(dep->endpoint.desc) && dep->stream_capable)
f6bafc6a 798 trb->ctrl |= DWC3_TRB_CTRL_SID_SOFN(req->request.stream_id);
c71fc37c 799
f6bafc6a 800 trb->ctrl |= DWC3_TRB_CTRL_HWO;
2c4cbe6e
FB
801
802 trace_dwc3_prepare_trb(dep, trb);
c71fc37c
FB
803}
804
72246da4
FB
805/*
806 * dwc3_prepare_trbs - setup TRBs from requests
807 * @dep: endpoint for which requests are being prepared
808 * @starting: true if the endpoint is idle and no requests are queued.
809 *
1d046793
PZ
810 * The function goes through the requests list and sets up TRBs for the
811 * transfers. The function returns once there are no more TRBs available or
812 * it runs out of requests.
72246da4 813 */
68e823e2 814static void dwc3_prepare_trbs(struct dwc3_ep *dep, bool starting)
72246da4 815{
68e823e2 816 struct dwc3_request *req, *n;
72246da4 817 u32 trbs_left;
8d62cd65 818 u32 max;
c71fc37c 819 unsigned int last_one = 0;
72246da4
FB
820
821 BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM);
822
823 /* the first request must not be queued */
824 trbs_left = (dep->busy_slot - dep->free_slot) & DWC3_TRB_MASK;
c71fc37c 825
8d62cd65 826 /* Can't wrap around on a non-isoc EP since there's no link TRB */
16e78db7 827 if (!usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
8d62cd65
PZ
828 max = DWC3_TRB_NUM - (dep->free_slot & DWC3_TRB_MASK);
829 if (trbs_left > max)
830 trbs_left = max;
831 }
832
72246da4 833 /*
1d046793
PZ
834 * If busy & slot are equal than it is either full or empty. If we are
835 * starting to process requests then we are empty. Otherwise we are
72246da4
FB
836 * full and don't do anything
837 */
838 if (!trbs_left) {
839 if (!starting)
68e823e2 840 return;
72246da4
FB
841 trbs_left = DWC3_TRB_NUM;
842 /*
843 * In case we start from scratch, we queue the ISOC requests
844 * starting from slot 1. This is done because we use ring
845 * buffer and have no LST bit to stop us. Instead, we place
1d046793 846 * IOC bit every TRB_NUM/4. We try to avoid having an interrupt
72246da4
FB
847 * after the first request so we start at slot 1 and have
848 * 7 requests proceed before we hit the first IOC.
849 * Other transfer types don't use the ring buffer and are
850 * processed from the first TRB until the last one. Since we
851 * don't wrap around we have to start at the beginning.
852 */
16e78db7 853 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
854 dep->busy_slot = 1;
855 dep->free_slot = 1;
856 } else {
857 dep->busy_slot = 0;
858 dep->free_slot = 0;
859 }
860 }
861
862 /* The last TRB is a link TRB, not used for xfer */
16e78db7 863 if ((trbs_left <= 1) && usb_endpoint_xfer_isoc(dep->endpoint.desc))
68e823e2 864 return;
72246da4
FB
865
866 list_for_each_entry_safe(req, n, &dep->request_list, list) {
eeb720fb
FB
867 unsigned length;
868 dma_addr_t dma;
e5ba5ec8 869 last_one = false;
72246da4 870
eeb720fb
FB
871 if (req->request.num_mapped_sgs > 0) {
872 struct usb_request *request = &req->request;
873 struct scatterlist *sg = request->sg;
874 struct scatterlist *s;
875 int i;
72246da4 876
eeb720fb
FB
877 for_each_sg(sg, s, request->num_mapped_sgs, i) {
878 unsigned chain = true;
72246da4 879
eeb720fb
FB
880 length = sg_dma_len(s);
881 dma = sg_dma_address(s);
72246da4 882
1d046793
PZ
883 if (i == (request->num_mapped_sgs - 1) ||
884 sg_is_last(s)) {
ec512fb8 885 if (list_empty(&dep->request_list))
e5ba5ec8 886 last_one = true;
eeb720fb
FB
887 chain = false;
888 }
72246da4 889
eeb720fb
FB
890 trbs_left--;
891 if (!trbs_left)
892 last_one = true;
72246da4 893
eeb720fb
FB
894 if (last_one)
895 chain = false;
72246da4 896
eeb720fb 897 dwc3_prepare_one_trb(dep, req, dma, length,
e5ba5ec8 898 last_one, chain, i);
72246da4 899
eeb720fb
FB
900 if (last_one)
901 break;
902 }
39e60635
AV
903
904 if (last_one)
905 break;
72246da4 906 } else {
eeb720fb
FB
907 dma = req->request.dma;
908 length = req->request.length;
909 trbs_left--;
72246da4 910
eeb720fb
FB
911 if (!trbs_left)
912 last_one = 1;
879631aa 913
eeb720fb
FB
914 /* Is this the last request? */
915 if (list_is_last(&req->list, &dep->request_list))
916 last_one = 1;
72246da4 917
eeb720fb 918 dwc3_prepare_one_trb(dep, req, dma, length,
e5ba5ec8 919 last_one, false, 0);
72246da4 920
eeb720fb
FB
921 if (last_one)
922 break;
72246da4 923 }
72246da4 924 }
72246da4
FB
925}
926
927static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep, u16 cmd_param,
928 int start_new)
929{
930 struct dwc3_gadget_ep_cmd_params params;
931 struct dwc3_request *req;
932 struct dwc3 *dwc = dep->dwc;
933 int ret;
934 u32 cmd;
935
936 if (start_new && (dep->flags & DWC3_EP_BUSY)) {
937 dev_vdbg(dwc->dev, "%s: endpoint busy\n", dep->name);
938 return -EBUSY;
939 }
940 dep->flags &= ~DWC3_EP_PENDING_REQUEST;
941
942 /*
943 * If we are getting here after a short-out-packet we don't enqueue any
944 * new requests as we try to set the IOC bit only on the last request.
945 */
946 if (start_new) {
947 if (list_empty(&dep->req_queued))
948 dwc3_prepare_trbs(dep, start_new);
949
950 /* req points to the first request which will be sent */
951 req = next_request(&dep->req_queued);
952 } else {
68e823e2
FB
953 dwc3_prepare_trbs(dep, start_new);
954
72246da4 955 /*
1d046793 956 * req points to the first request where HWO changed from 0 to 1
72246da4 957 */
68e823e2 958 req = next_request(&dep->req_queued);
72246da4
FB
959 }
960 if (!req) {
961 dep->flags |= DWC3_EP_PENDING_REQUEST;
962 return 0;
963 }
964
965 memset(&params, 0, sizeof(params));
72246da4 966
1877d6c9
PA
967 if (start_new) {
968 params.param0 = upper_32_bits(req->trb_dma);
969 params.param1 = lower_32_bits(req->trb_dma);
72246da4 970 cmd = DWC3_DEPCMD_STARTTRANSFER;
1877d6c9 971 } else {
72246da4 972 cmd = DWC3_DEPCMD_UPDATETRANSFER;
1877d6c9 973 }
72246da4
FB
974
975 cmd |= DWC3_DEPCMD_PARAM(cmd_param);
976 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
977 if (ret < 0) {
978 dev_dbg(dwc->dev, "failed to send STARTTRANSFER command\n");
979
980 /*
981 * FIXME we need to iterate over the list of requests
982 * here and stop, unmap, free and del each of the linked
1d046793 983 * requests instead of what we do now.
72246da4 984 */
0fc9a1be
FB
985 usb_gadget_unmap_request(&dwc->gadget, &req->request,
986 req->direction);
72246da4
FB
987 list_del(&req->list);
988 return ret;
989 }
990
991 dep->flags |= DWC3_EP_BUSY;
25b8ff68 992
f898ae09 993 if (start_new) {
b4996a86 994 dep->resource_index = dwc3_gadget_ep_get_transfer_index(dwc,
f898ae09 995 dep->number);
b4996a86 996 WARN_ON_ONCE(!dep->resource_index);
f898ae09 997 }
25b8ff68 998
72246da4
FB
999 return 0;
1000}
1001
d6d6ec7b
PA
1002static void __dwc3_gadget_start_isoc(struct dwc3 *dwc,
1003 struct dwc3_ep *dep, u32 cur_uf)
1004{
1005 u32 uf;
1006
1007 if (list_empty(&dep->request_list)) {
1008 dev_vdbg(dwc->dev, "ISOC ep %s run out for requests.\n",
1009 dep->name);
f4a53c55 1010 dep->flags |= DWC3_EP_PENDING_REQUEST;
d6d6ec7b
PA
1011 return;
1012 }
1013
1014 /* 4 micro frames in the future */
1015 uf = cur_uf + dep->interval * 4;
1016
1017 __dwc3_gadget_kick_transfer(dep, uf, 1);
1018}
1019
1020static void dwc3_gadget_start_isoc(struct dwc3 *dwc,
1021 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
1022{
1023 u32 cur_uf, mask;
1024
1025 mask = ~(dep->interval - 1);
1026 cur_uf = event->parameters & mask;
1027
1028 __dwc3_gadget_start_isoc(dwc, dep, cur_uf);
1029}
1030
72246da4
FB
1031static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req)
1032{
0fc9a1be
FB
1033 struct dwc3 *dwc = dep->dwc;
1034 int ret;
1035
72246da4
FB
1036 req->request.actual = 0;
1037 req->request.status = -EINPROGRESS;
1038 req->direction = dep->direction;
1039 req->epnum = dep->number;
1040
1041 /*
1042 * We only add to our list of requests now and
1043 * start consuming the list once we get XferNotReady
1044 * IRQ.
1045 *
1046 * That way, we avoid doing anything that we don't need
1047 * to do now and defer it until the point we receive a
1048 * particular token from the Host side.
1049 *
1050 * This will also avoid Host cancelling URBs due to too
1d046793 1051 * many NAKs.
72246da4 1052 */
0fc9a1be
FB
1053 ret = usb_gadget_map_request(&dwc->gadget, &req->request,
1054 dep->direction);
1055 if (ret)
1056 return ret;
1057
72246da4
FB
1058 list_add_tail(&req->list, &dep->request_list);
1059
1060 /*
b511e5e7 1061 * There are a few special cases:
72246da4 1062 *
f898ae09
PZ
1063 * 1. XferNotReady with empty list of requests. We need to kick the
1064 * transfer here in that situation, otherwise we will be NAKing
1065 * forever. If we get XferNotReady before gadget driver has a
1066 * chance to queue a request, we will ACK the IRQ but won't be
1067 * able to receive the data until the next request is queued.
1068 * The following code is handling exactly that.
72246da4 1069 *
72246da4
FB
1070 */
1071 if (dep->flags & DWC3_EP_PENDING_REQUEST) {
f4a53c55
PA
1072 /*
1073 * If xfernotready is already elapsed and it is a case
1074 * of isoc transfer, then issue END TRANSFER, so that
1075 * you can receive xfernotready again and can have
1076 * notion of current microframe.
1077 */
1078 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
cdc359dd 1079 if (list_empty(&dep->req_queued)) {
b992e681 1080 dwc3_stop_active_transfer(dwc, dep->number, true);
cdc359dd
PA
1081 dep->flags = DWC3_EP_ENABLED;
1082 }
f4a53c55
PA
1083 return 0;
1084 }
1085
b511e5e7 1086 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
348e026f 1087 if (ret && ret != -EBUSY)
b511e5e7
FB
1088 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1089 dep->name);
15f86bde 1090 return ret;
b511e5e7 1091 }
72246da4 1092
b511e5e7
FB
1093 /*
1094 * 2. XferInProgress on Isoc EP with an active transfer. We need to
1095 * kick the transfer here after queuing a request, otherwise the
1096 * core may not see the modified TRB(s).
1097 */
1098 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
79c9046e
PA
1099 (dep->flags & DWC3_EP_BUSY) &&
1100 !(dep->flags & DWC3_EP_MISSED_ISOC)) {
b4996a86
FB
1101 WARN_ON_ONCE(!dep->resource_index);
1102 ret = __dwc3_gadget_kick_transfer(dep, dep->resource_index,
b511e5e7 1103 false);
348e026f 1104 if (ret && ret != -EBUSY)
72246da4
FB
1105 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1106 dep->name);
15f86bde 1107 return ret;
a0925324 1108 }
72246da4 1109
b997ada5
FB
1110 /*
1111 * 4. Stream Capable Bulk Endpoints. We need to start the transfer
1112 * right away, otherwise host will not know we have streams to be
1113 * handled.
1114 */
1115 if (dep->stream_capable) {
1116 int ret;
1117
1118 ret = __dwc3_gadget_kick_transfer(dep, 0, true);
1119 if (ret && ret != -EBUSY) {
1120 struct dwc3 *dwc = dep->dwc;
1121
1122 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1123 dep->name);
1124 }
1125 }
1126
72246da4
FB
1127 return 0;
1128}
1129
1130static int dwc3_gadget_ep_queue(struct usb_ep *ep, struct usb_request *request,
1131 gfp_t gfp_flags)
1132{
1133 struct dwc3_request *req = to_dwc3_request(request);
1134 struct dwc3_ep *dep = to_dwc3_ep(ep);
1135 struct dwc3 *dwc = dep->dwc;
1136
1137 unsigned long flags;
1138
1139 int ret;
1140
fdee4eba 1141 spin_lock_irqsave(&dwc->lock, flags);
16e78db7 1142 if (!dep->endpoint.desc) {
72246da4
FB
1143 dev_dbg(dwc->dev, "trying to queue request %p to disabled %s\n",
1144 request, ep->name);
73359cef
FB
1145 ret = -ESHUTDOWN;
1146 goto out;
1147 }
1148
1149 if (WARN(req->dep != dep, "request %p belongs to '%s'\n",
1150 request, req->dep->name)) {
1151 ret = -EINVAL;
1152 goto out;
72246da4
FB
1153 }
1154
1155 dev_vdbg(dwc->dev, "queing request %p to %s length %d\n",
1156 request, ep->name, request->length);
2c4cbe6e 1157 trace_dwc3_ep_queue(req);
72246da4 1158
72246da4 1159 ret = __dwc3_gadget_ep_queue(dep, req);
73359cef
FB
1160
1161out:
72246da4
FB
1162 spin_unlock_irqrestore(&dwc->lock, flags);
1163
1164 return ret;
1165}
1166
1167static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
1168 struct usb_request *request)
1169{
1170 struct dwc3_request *req = to_dwc3_request(request);
1171 struct dwc3_request *r = NULL;
1172
1173 struct dwc3_ep *dep = to_dwc3_ep(ep);
1174 struct dwc3 *dwc = dep->dwc;
1175
1176 unsigned long flags;
1177 int ret = 0;
1178
2c4cbe6e
FB
1179 trace_dwc3_ep_dequeue(req);
1180
72246da4
FB
1181 spin_lock_irqsave(&dwc->lock, flags);
1182
1183 list_for_each_entry(r, &dep->request_list, list) {
1184 if (r == req)
1185 break;
1186 }
1187
1188 if (r != req) {
1189 list_for_each_entry(r, &dep->req_queued, list) {
1190 if (r == req)
1191 break;
1192 }
1193 if (r == req) {
1194 /* wait until it is processed */
b992e681 1195 dwc3_stop_active_transfer(dwc, dep->number, true);
e8d4e8be 1196 goto out1;
72246da4
FB
1197 }
1198 dev_err(dwc->dev, "request %p was not queued to %s\n",
1199 request, ep->name);
1200 ret = -EINVAL;
1201 goto out0;
1202 }
1203
e8d4e8be 1204out1:
72246da4
FB
1205 /* giveback the request */
1206 dwc3_gadget_giveback(dep, req, -ECONNRESET);
1207
1208out0:
1209 spin_unlock_irqrestore(&dwc->lock, flags);
1210
1211 return ret;
1212}
1213
7a608559 1214int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol)
72246da4
FB
1215{
1216 struct dwc3_gadget_ep_cmd_params params;
1217 struct dwc3 *dwc = dep->dwc;
1218 int ret;
1219
5ad02fb8
FB
1220 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
1221 dev_err(dwc->dev, "%s is of Isochronous type\n", dep->name);
1222 return -EINVAL;
1223 }
1224
72246da4
FB
1225 memset(&params, 0x00, sizeof(params));
1226
1227 if (value) {
7a608559
FB
1228 if (!protocol && ((dep->direction && dep->flags & DWC3_EP_BUSY) ||
1229 (!list_empty(&dep->req_queued) ||
1230 !list_empty(&dep->request_list)))) {
1231 dev_dbg(dwc->dev, "%s: pending request, cannot halt\n",
1232 dep->name);
1233 return -EAGAIN;
1234 }
1235
72246da4
FB
1236 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1237 DWC3_DEPCMD_SETSTALL, &params);
1238 if (ret)
3f89204b 1239 dev_err(dwc->dev, "failed to set STALL on %s\n",
72246da4
FB
1240 dep->name);
1241 else
1242 dep->flags |= DWC3_EP_STALL;
1243 } else {
1244 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
1245 DWC3_DEPCMD_CLEARSTALL, &params);
1246 if (ret)
3f89204b 1247 dev_err(dwc->dev, "failed to clear STALL on %s\n",
72246da4
FB
1248 dep->name);
1249 else
a535d81c 1250 dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE);
72246da4 1251 }
5275455a 1252
72246da4
FB
1253 return ret;
1254}
1255
1256static int dwc3_gadget_ep_set_halt(struct usb_ep *ep, int value)
1257{
1258 struct dwc3_ep *dep = to_dwc3_ep(ep);
1259 struct dwc3 *dwc = dep->dwc;
1260
1261 unsigned long flags;
1262
1263 int ret;
1264
1265 spin_lock_irqsave(&dwc->lock, flags);
7a608559 1266 ret = __dwc3_gadget_ep_set_halt(dep, value, false);
72246da4
FB
1267 spin_unlock_irqrestore(&dwc->lock, flags);
1268
1269 return ret;
1270}
1271
1272static int dwc3_gadget_ep_set_wedge(struct usb_ep *ep)
1273{
1274 struct dwc3_ep *dep = to_dwc3_ep(ep);
249a4569
PZ
1275 struct dwc3 *dwc = dep->dwc;
1276 unsigned long flags;
95aa4e8d 1277 int ret;
72246da4 1278
249a4569 1279 spin_lock_irqsave(&dwc->lock, flags);
72246da4
FB
1280 dep->flags |= DWC3_EP_WEDGE;
1281
08f0d966 1282 if (dep->number == 0 || dep->number == 1)
95aa4e8d 1283 ret = __dwc3_gadget_ep0_set_halt(ep, 1);
08f0d966 1284 else
7a608559 1285 ret = __dwc3_gadget_ep_set_halt(dep, 1, false);
95aa4e8d
FB
1286 spin_unlock_irqrestore(&dwc->lock, flags);
1287
1288 return ret;
72246da4
FB
1289}
1290
1291/* -------------------------------------------------------------------------- */
1292
1293static struct usb_endpoint_descriptor dwc3_gadget_ep0_desc = {
1294 .bLength = USB_DT_ENDPOINT_SIZE,
1295 .bDescriptorType = USB_DT_ENDPOINT,
1296 .bmAttributes = USB_ENDPOINT_XFER_CONTROL,
1297};
1298
1299static const struct usb_ep_ops dwc3_gadget_ep0_ops = {
1300 .enable = dwc3_gadget_ep0_enable,
1301 .disable = dwc3_gadget_ep0_disable,
1302 .alloc_request = dwc3_gadget_ep_alloc_request,
1303 .free_request = dwc3_gadget_ep_free_request,
1304 .queue = dwc3_gadget_ep0_queue,
1305 .dequeue = dwc3_gadget_ep_dequeue,
08f0d966 1306 .set_halt = dwc3_gadget_ep0_set_halt,
72246da4
FB
1307 .set_wedge = dwc3_gadget_ep_set_wedge,
1308};
1309
1310static const struct usb_ep_ops dwc3_gadget_ep_ops = {
1311 .enable = dwc3_gadget_ep_enable,
1312 .disable = dwc3_gadget_ep_disable,
1313 .alloc_request = dwc3_gadget_ep_alloc_request,
1314 .free_request = dwc3_gadget_ep_free_request,
1315 .queue = dwc3_gadget_ep_queue,
1316 .dequeue = dwc3_gadget_ep_dequeue,
1317 .set_halt = dwc3_gadget_ep_set_halt,
1318 .set_wedge = dwc3_gadget_ep_set_wedge,
1319};
1320
1321/* -------------------------------------------------------------------------- */
1322
1323static int dwc3_gadget_get_frame(struct usb_gadget *g)
1324{
1325 struct dwc3 *dwc = gadget_to_dwc(g);
1326 u32 reg;
1327
1328 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1329 return DWC3_DSTS_SOFFN(reg);
1330}
1331
1332static int dwc3_gadget_wakeup(struct usb_gadget *g)
1333{
1334 struct dwc3 *dwc = gadget_to_dwc(g);
1335
1336 unsigned long timeout;
1337 unsigned long flags;
1338
1339 u32 reg;
1340
1341 int ret = 0;
1342
1343 u8 link_state;
1344 u8 speed;
1345
1346 spin_lock_irqsave(&dwc->lock, flags);
1347
1348 /*
1349 * According to the Databook Remote wakeup request should
1350 * be issued only when the device is in early suspend state.
1351 *
1352 * We can check that via USB Link State bits in DSTS register.
1353 */
1354 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1355
1356 speed = reg & DWC3_DSTS_CONNECTSPD;
1357 if (speed == DWC3_DSTS_SUPERSPEED) {
1358 dev_dbg(dwc->dev, "no wakeup on SuperSpeed\n");
1359 ret = -EINVAL;
1360 goto out;
1361 }
1362
1363 link_state = DWC3_DSTS_USBLNKST(reg);
1364
1365 switch (link_state) {
1366 case DWC3_LINK_STATE_RX_DET: /* in HS, means Early Suspend */
1367 case DWC3_LINK_STATE_U3: /* in HS, means SUSPEND */
1368 break;
1369 default:
1370 dev_dbg(dwc->dev, "can't wakeup from link state %d\n",
1371 link_state);
1372 ret = -EINVAL;
1373 goto out;
1374 }
1375
8598bde7
FB
1376 ret = dwc3_gadget_set_link_state(dwc, DWC3_LINK_STATE_RECOV);
1377 if (ret < 0) {
1378 dev_err(dwc->dev, "failed to put link in Recovery\n");
1379 goto out;
1380 }
72246da4 1381
802fde98
PZ
1382 /* Recent versions do this automatically */
1383 if (dwc->revision < DWC3_REVISION_194A) {
1384 /* write zeroes to Link Change Request */
fcc023c7 1385 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
802fde98
PZ
1386 reg &= ~DWC3_DCTL_ULSTCHNGREQ_MASK;
1387 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1388 }
72246da4 1389
1d046793 1390 /* poll until Link State changes to ON */
72246da4
FB
1391 timeout = jiffies + msecs_to_jiffies(100);
1392
1d046793 1393 while (!time_after(jiffies, timeout)) {
72246da4
FB
1394 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1395
1396 /* in HS, means ON */
1397 if (DWC3_DSTS_USBLNKST(reg) == DWC3_LINK_STATE_U0)
1398 break;
1399 }
1400
1401 if (DWC3_DSTS_USBLNKST(reg) != DWC3_LINK_STATE_U0) {
1402 dev_err(dwc->dev, "failed to send remote wakeup\n");
1403 ret = -EINVAL;
1404 }
1405
1406out:
1407 spin_unlock_irqrestore(&dwc->lock, flags);
1408
1409 return ret;
1410}
1411
1412static int dwc3_gadget_set_selfpowered(struct usb_gadget *g,
1413 int is_selfpowered)
1414{
1415 struct dwc3 *dwc = gadget_to_dwc(g);
249a4569 1416 unsigned long flags;
72246da4 1417
249a4569 1418 spin_lock_irqsave(&dwc->lock, flags);
72246da4 1419 dwc->is_selfpowered = !!is_selfpowered;
249a4569 1420 spin_unlock_irqrestore(&dwc->lock, flags);
72246da4
FB
1421
1422 return 0;
1423}
1424
7b2a0368 1425static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
72246da4
FB
1426{
1427 u32 reg;
61d58242 1428 u32 timeout = 500;
72246da4
FB
1429
1430 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
8db7ed15 1431 if (is_on) {
802fde98
PZ
1432 if (dwc->revision <= DWC3_REVISION_187A) {
1433 reg &= ~DWC3_DCTL_TRGTULST_MASK;
1434 reg |= DWC3_DCTL_TRGTULST_RX_DET;
1435 }
1436
1437 if (dwc->revision >= DWC3_REVISION_194A)
1438 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1439 reg |= DWC3_DCTL_RUN_STOP;
7b2a0368
FB
1440
1441 if (dwc->has_hibernation)
1442 reg |= DWC3_DCTL_KEEP_CONNECT;
1443
9fcb3bd8 1444 dwc->pullups_connected = true;
8db7ed15 1445 } else {
72246da4 1446 reg &= ~DWC3_DCTL_RUN_STOP;
7b2a0368
FB
1447
1448 if (dwc->has_hibernation && !suspend)
1449 reg &= ~DWC3_DCTL_KEEP_CONNECT;
1450
9fcb3bd8 1451 dwc->pullups_connected = false;
8db7ed15 1452 }
72246da4
FB
1453
1454 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1455
1456 do {
1457 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
1458 if (is_on) {
1459 if (!(reg & DWC3_DSTS_DEVCTRLHLT))
1460 break;
1461 } else {
1462 if (reg & DWC3_DSTS_DEVCTRLHLT)
1463 break;
1464 }
72246da4
FB
1465 timeout--;
1466 if (!timeout)
6f17f74b 1467 return -ETIMEDOUT;
61d58242 1468 udelay(1);
72246da4
FB
1469 } while (1);
1470
1471 dev_vdbg(dwc->dev, "gadget %s data soft-%s\n",
1472 dwc->gadget_driver
1473 ? dwc->gadget_driver->function : "no-function",
1474 is_on ? "connect" : "disconnect");
6f17f74b
PA
1475
1476 return 0;
72246da4
FB
1477}
1478
1479static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
1480{
1481 struct dwc3 *dwc = gadget_to_dwc(g);
1482 unsigned long flags;
6f17f74b 1483 int ret;
72246da4
FB
1484
1485 is_on = !!is_on;
1486
1487 spin_lock_irqsave(&dwc->lock, flags);
7b2a0368 1488 ret = dwc3_gadget_run_stop(dwc, is_on, false);
72246da4
FB
1489 spin_unlock_irqrestore(&dwc->lock, flags);
1490
6f17f74b 1491 return ret;
72246da4
FB
1492}
1493
8698e2ac
FB
1494static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
1495{
1496 u32 reg;
1497
1498 /* Enable all but Start and End of Frame IRQs */
1499 reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
1500 DWC3_DEVTEN_EVNTOVERFLOWEN |
1501 DWC3_DEVTEN_CMDCMPLTEN |
1502 DWC3_DEVTEN_ERRTICERREN |
1503 DWC3_DEVTEN_WKUPEVTEN |
1504 DWC3_DEVTEN_ULSTCNGEN |
1505 DWC3_DEVTEN_CONNECTDONEEN |
1506 DWC3_DEVTEN_USBRSTEN |
1507 DWC3_DEVTEN_DISCONNEVTEN);
1508
1509 dwc3_writel(dwc->regs, DWC3_DEVTEN, reg);
1510}
1511
1512static void dwc3_gadget_disable_irq(struct dwc3 *dwc)
1513{
1514 /* mask all interrupts */
1515 dwc3_writel(dwc->regs, DWC3_DEVTEN, 0x00);
1516}
1517
1518static irqreturn_t dwc3_interrupt(int irq, void *_dwc);
b15a762f 1519static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc);
8698e2ac 1520
72246da4
FB
1521static int dwc3_gadget_start(struct usb_gadget *g,
1522 struct usb_gadget_driver *driver)
1523{
1524 struct dwc3 *dwc = gadget_to_dwc(g);
1525 struct dwc3_ep *dep;
1526 unsigned long flags;
1527 int ret = 0;
8698e2ac 1528 int irq;
72246da4
FB
1529 u32 reg;
1530
b0d7ffd4
FB
1531 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1532 ret = request_threaded_irq(irq, dwc3_interrupt, dwc3_thread_interrupt,
e8adfc30 1533 IRQF_SHARED, "dwc3", dwc);
b0d7ffd4
FB
1534 if (ret) {
1535 dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
1536 irq, ret);
1537 goto err0;
1538 }
1539
72246da4
FB
1540 spin_lock_irqsave(&dwc->lock, flags);
1541
1542 if (dwc->gadget_driver) {
1543 dev_err(dwc->dev, "%s is already bound to %s\n",
1544 dwc->gadget.name,
1545 dwc->gadget_driver->driver.name);
1546 ret = -EBUSY;
b0d7ffd4 1547 goto err1;
72246da4
FB
1548 }
1549
1550 dwc->gadget_driver = driver;
72246da4 1551
72246da4
FB
1552 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
1553 reg &= ~(DWC3_DCFG_SPEED_MASK);
07e7f47b
FB
1554
1555 /**
1556 * WORKAROUND: DWC3 revision < 2.20a have an issue
1557 * which would cause metastability state on Run/Stop
1558 * bit if we try to force the IP to USB2-only mode.
1559 *
1560 * Because of that, we cannot configure the IP to any
1561 * speed other than the SuperSpeed
1562 *
1563 * Refers to:
1564 *
1565 * STAR#9000525659: Clock Domain Crossing on DCTL in
1566 * USB 2.0 Mode
1567 */
f7e846f0 1568 if (dwc->revision < DWC3_REVISION_220A) {
07e7f47b 1569 reg |= DWC3_DCFG_SUPERSPEED;
f7e846f0
FB
1570 } else {
1571 switch (dwc->maximum_speed) {
1572 case USB_SPEED_LOW:
1573 reg |= DWC3_DSTS_LOWSPEED;
1574 break;
1575 case USB_SPEED_FULL:
1576 reg |= DWC3_DSTS_FULLSPEED1;
1577 break;
1578 case USB_SPEED_HIGH:
1579 reg |= DWC3_DSTS_HIGHSPEED;
1580 break;
1581 case USB_SPEED_SUPER: /* FALLTHROUGH */
1582 case USB_SPEED_UNKNOWN: /* FALTHROUGH */
1583 default:
1584 reg |= DWC3_DSTS_SUPERSPEED;
1585 }
1586 }
72246da4
FB
1587 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
1588
b23c8439
PZ
1589 dwc->start_config_issued = false;
1590
72246da4
FB
1591 /* Start with SuperSpeed Default */
1592 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
1593
1594 dep = dwc->eps[0];
265b70a7
PZ
1595 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1596 false);
72246da4
FB
1597 if (ret) {
1598 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
b0d7ffd4 1599 goto err2;
72246da4
FB
1600 }
1601
1602 dep = dwc->eps[1];
265b70a7
PZ
1603 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
1604 false);
72246da4
FB
1605 if (ret) {
1606 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
b0d7ffd4 1607 goto err3;
72246da4
FB
1608 }
1609
1610 /* begin to receive SETUP packets */
c7fcdeb2 1611 dwc->ep0state = EP0_SETUP_PHASE;
72246da4
FB
1612 dwc3_ep0_out_start(dwc);
1613
8698e2ac
FB
1614 dwc3_gadget_enable_irq(dwc);
1615
72246da4
FB
1616 spin_unlock_irqrestore(&dwc->lock, flags);
1617
1618 return 0;
1619
b0d7ffd4 1620err3:
72246da4
FB
1621 __dwc3_gadget_ep_disable(dwc->eps[0]);
1622
b0d7ffd4 1623err2:
cdcedd69 1624 dwc->gadget_driver = NULL;
b0d7ffd4
FB
1625
1626err1:
72246da4
FB
1627 spin_unlock_irqrestore(&dwc->lock, flags);
1628
b0d7ffd4
FB
1629 free_irq(irq, dwc);
1630
1631err0:
72246da4
FB
1632 return ret;
1633}
1634
22835b80 1635static int dwc3_gadget_stop(struct usb_gadget *g)
72246da4
FB
1636{
1637 struct dwc3 *dwc = gadget_to_dwc(g);
1638 unsigned long flags;
8698e2ac 1639 int irq;
72246da4
FB
1640
1641 spin_lock_irqsave(&dwc->lock, flags);
1642
8698e2ac 1643 dwc3_gadget_disable_irq(dwc);
72246da4
FB
1644 __dwc3_gadget_ep_disable(dwc->eps[0]);
1645 __dwc3_gadget_ep_disable(dwc->eps[1]);
1646
1647 dwc->gadget_driver = NULL;
72246da4
FB
1648
1649 spin_unlock_irqrestore(&dwc->lock, flags);
1650
b0d7ffd4
FB
1651 irq = platform_get_irq(to_platform_device(dwc->dev), 0);
1652 free_irq(irq, dwc);
1653
72246da4
FB
1654 return 0;
1655}
802fde98 1656
72246da4
FB
1657static const struct usb_gadget_ops dwc3_gadget_ops = {
1658 .get_frame = dwc3_gadget_get_frame,
1659 .wakeup = dwc3_gadget_wakeup,
1660 .set_selfpowered = dwc3_gadget_set_selfpowered,
1661 .pullup = dwc3_gadget_pullup,
1662 .udc_start = dwc3_gadget_start,
1663 .udc_stop = dwc3_gadget_stop,
1664};
1665
1666/* -------------------------------------------------------------------------- */
1667
6a1e3ef4
FB
1668static int dwc3_gadget_init_hw_endpoints(struct dwc3 *dwc,
1669 u8 num, u32 direction)
72246da4
FB
1670{
1671 struct dwc3_ep *dep;
6a1e3ef4 1672 u8 i;
72246da4 1673
6a1e3ef4
FB
1674 for (i = 0; i < num; i++) {
1675 u8 epnum = (i << 1) | (!!direction);
72246da4 1676
72246da4 1677 dep = kzalloc(sizeof(*dep), GFP_KERNEL);
734d5a53 1678 if (!dep)
72246da4 1679 return -ENOMEM;
72246da4
FB
1680
1681 dep->dwc = dwc;
1682 dep->number = epnum;
9aa62ae4 1683 dep->direction = !!direction;
72246da4
FB
1684 dwc->eps[epnum] = dep;
1685
1686 snprintf(dep->name, sizeof(dep->name), "ep%d%s", epnum >> 1,
1687 (epnum & 1) ? "in" : "out");
6a1e3ef4 1688
72246da4 1689 dep->endpoint.name = dep->name;
72246da4 1690
653df35e
FB
1691 dev_vdbg(dwc->dev, "initializing %s\n", dep->name);
1692
72246da4 1693 if (epnum == 0 || epnum == 1) {
e117e742 1694 usb_ep_set_maxpacket_limit(&dep->endpoint, 512);
6048e4c6 1695 dep->endpoint.maxburst = 1;
72246da4
FB
1696 dep->endpoint.ops = &dwc3_gadget_ep0_ops;
1697 if (!epnum)
1698 dwc->gadget.ep0 = &dep->endpoint;
1699 } else {
1700 int ret;
1701
e117e742 1702 usb_ep_set_maxpacket_limit(&dep->endpoint, 1024);
12d36c16 1703 dep->endpoint.max_streams = 15;
72246da4
FB
1704 dep->endpoint.ops = &dwc3_gadget_ep_ops;
1705 list_add_tail(&dep->endpoint.ep_list,
1706 &dwc->gadget.ep_list);
1707
1708 ret = dwc3_alloc_trb_pool(dep);
25b8ff68 1709 if (ret)
72246da4 1710 return ret;
72246da4 1711 }
25b8ff68 1712
72246da4
FB
1713 INIT_LIST_HEAD(&dep->request_list);
1714 INIT_LIST_HEAD(&dep->req_queued);
1715 }
1716
1717 return 0;
1718}
1719
6a1e3ef4
FB
1720static int dwc3_gadget_init_endpoints(struct dwc3 *dwc)
1721{
1722 int ret;
1723
1724 INIT_LIST_HEAD(&dwc->gadget.ep_list);
1725
1726 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_out_eps, 0);
1727 if (ret < 0) {
1728 dev_vdbg(dwc->dev, "failed to allocate OUT endpoints\n");
1729 return ret;
1730 }
1731
1732 ret = dwc3_gadget_init_hw_endpoints(dwc, dwc->num_in_eps, 1);
1733 if (ret < 0) {
1734 dev_vdbg(dwc->dev, "failed to allocate IN endpoints\n");
1735 return ret;
1736 }
1737
1738 return 0;
1739}
1740
72246da4
FB
1741static void dwc3_gadget_free_endpoints(struct dwc3 *dwc)
1742{
1743 struct dwc3_ep *dep;
1744 u8 epnum;
1745
1746 for (epnum = 0; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
1747 dep = dwc->eps[epnum];
6a1e3ef4
FB
1748 if (!dep)
1749 continue;
5bf8fae3
GC
1750 /*
1751 * Physical endpoints 0 and 1 are special; they form the
1752 * bi-directional USB endpoint 0.
1753 *
1754 * For those two physical endpoints, we don't allocate a TRB
1755 * pool nor do we add them the endpoints list. Due to that, we
1756 * shouldn't do these two operations otherwise we would end up
1757 * with all sorts of bugs when removing dwc3.ko.
1758 */
1759 if (epnum != 0 && epnum != 1) {
1760 dwc3_free_trb_pool(dep);
72246da4 1761 list_del(&dep->endpoint.ep_list);
5bf8fae3 1762 }
72246da4
FB
1763
1764 kfree(dep);
1765 }
1766}
1767
72246da4 1768/* -------------------------------------------------------------------------- */
e5caff68 1769
e5ba5ec8
PA
1770static int __dwc3_cleanup_done_trbs(struct dwc3 *dwc, struct dwc3_ep *dep,
1771 struct dwc3_request *req, struct dwc3_trb *trb,
72246da4
FB
1772 const struct dwc3_event_depevt *event, int status)
1773{
72246da4
FB
1774 unsigned int count;
1775 unsigned int s_pkt = 0;
d6d6ec7b 1776 unsigned int trb_status;
72246da4 1777
2c4cbe6e
FB
1778 trace_dwc3_complete_trb(dep, trb);
1779
e5ba5ec8
PA
1780 if ((trb->ctrl & DWC3_TRB_CTRL_HWO) && status != -ESHUTDOWN)
1781 /*
1782 * We continue despite the error. There is not much we
1783 * can do. If we don't clean it up we loop forever. If
1784 * we skip the TRB then it gets overwritten after a
1785 * while since we use them in a ring buffer. A BUG()
1786 * would help. Lets hope that if this occurs, someone
1787 * fixes the root cause instead of looking away :)
1788 */
1789 dev_err(dwc->dev, "%s's TRB (%p) still owned by HW\n",
1790 dep->name, trb);
1791 count = trb->size & DWC3_TRB_SIZE_MASK;
1792
1793 if (dep->direction) {
1794 if (count) {
1795 trb_status = DWC3_TRB_SIZE_TRBSTS(trb->size);
1796 if (trb_status == DWC3_TRBSTS_MISSED_ISOC) {
1797 dev_dbg(dwc->dev, "incomplete IN transfer %s\n",
1798 dep->name);
1799 /*
1800 * If missed isoc occurred and there is
1801 * no request queued then issue END
1802 * TRANSFER, so that core generates
1803 * next xfernotready and we will issue
1804 * a fresh START TRANSFER.
1805 * If there are still queued request
1806 * then wait, do not issue either END
1807 * or UPDATE TRANSFER, just attach next
1808 * request in request_list during
1809 * giveback.If any future queued request
1810 * is successfully transferred then we
1811 * will issue UPDATE TRANSFER for all
1812 * request in the request_list.
1813 */
1814 dep->flags |= DWC3_EP_MISSED_ISOC;
1815 } else {
1816 dev_err(dwc->dev, "incomplete IN transfer %s\n",
1817 dep->name);
1818 status = -ECONNRESET;
1819 }
1820 } else {
1821 dep->flags &= ~DWC3_EP_MISSED_ISOC;
1822 }
1823 } else {
1824 if (count && (event->status & DEPEVT_STATUS_SHORT))
1825 s_pkt = 1;
1826 }
1827
1828 /*
1829 * We assume here we will always receive the entire data block
1830 * which we should receive. Meaning, if we program RX to
1831 * receive 4K but we receive only 2K, we assume that's all we
1832 * should receive and we simply bounce the request back to the
1833 * gadget driver for further processing.
1834 */
1835 req->request.actual += req->request.length - count;
1836 if (s_pkt)
1837 return 1;
1838 if ((event->status & DEPEVT_STATUS_LST) &&
1839 (trb->ctrl & (DWC3_TRB_CTRL_LST |
1840 DWC3_TRB_CTRL_HWO)))
1841 return 1;
1842 if ((event->status & DEPEVT_STATUS_IOC) &&
1843 (trb->ctrl & DWC3_TRB_CTRL_IOC))
1844 return 1;
1845 return 0;
1846}
1847
1848static int dwc3_cleanup_done_reqs(struct dwc3 *dwc, struct dwc3_ep *dep,
1849 const struct dwc3_event_depevt *event, int status)
1850{
1851 struct dwc3_request *req;
1852 struct dwc3_trb *trb;
1853 unsigned int slot;
1854 unsigned int i;
1855 int ret;
1856
72246da4
FB
1857 do {
1858 req = next_request(&dep->req_queued);
d39ee7be
SAS
1859 if (!req) {
1860 WARN_ON_ONCE(1);
1861 return 1;
1862 }
e5ba5ec8
PA
1863 i = 0;
1864 do {
1865 slot = req->start_slot + i;
1866 if ((slot == DWC3_TRB_NUM - 1) &&
1867 usb_endpoint_xfer_isoc(dep->endpoint.desc))
1868 slot++;
1869 slot %= DWC3_TRB_NUM;
1870 trb = &dep->trb_pool[slot];
72246da4 1871
e5ba5ec8
PA
1872 ret = __dwc3_cleanup_done_trbs(dwc, dep, req, trb,
1873 event, status);
1874 if (ret)
1875 break;
1876 }while (++i < req->request.num_mapped_sgs);
72246da4 1877
72246da4 1878 dwc3_gadget_giveback(dep, req, status);
e5ba5ec8
PA
1879
1880 if (ret)
72246da4
FB
1881 break;
1882 } while (1);
1883
cdc359dd
PA
1884 if (usb_endpoint_xfer_isoc(dep->endpoint.desc) &&
1885 list_empty(&dep->req_queued)) {
1886 if (list_empty(&dep->request_list)) {
1887 /*
1888 * If there is no entry in request list then do
1889 * not issue END TRANSFER now. Just set PENDING
1890 * flag, so that END TRANSFER is issued when an
1891 * entry is added into request list.
1892 */
1893 dep->flags = DWC3_EP_PENDING_REQUEST;
1894 } else {
b992e681 1895 dwc3_stop_active_transfer(dwc, dep->number, true);
cdc359dd
PA
1896 dep->flags = DWC3_EP_ENABLED;
1897 }
7efea86c
PA
1898 return 1;
1899 }
1900
72246da4
FB
1901 return 1;
1902}
1903
1904static void dwc3_endpoint_transfer_complete(struct dwc3 *dwc,
029d97ff 1905 struct dwc3_ep *dep, const struct dwc3_event_depevt *event)
72246da4
FB
1906{
1907 unsigned status = 0;
1908 int clean_busy;
1909
1910 if (event->status & DEPEVT_STATUS_BUSERR)
1911 status = -ECONNRESET;
1912
1d046793 1913 clean_busy = dwc3_cleanup_done_reqs(dwc, dep, event, status);
c2df85ca 1914 if (clean_busy)
72246da4 1915 dep->flags &= ~DWC3_EP_BUSY;
fae2b904
FB
1916
1917 /*
1918 * WORKAROUND: This is the 2nd half of U1/U2 -> U0 workaround.
1919 * See dwc3_gadget_linksts_change_interrupt() for 1st half.
1920 */
1921 if (dwc->revision < DWC3_REVISION_183A) {
1922 u32 reg;
1923 int i;
1924
1925 for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
348e026f 1926 dep = dwc->eps[i];
fae2b904
FB
1927
1928 if (!(dep->flags & DWC3_EP_ENABLED))
1929 continue;
1930
1931 if (!list_empty(&dep->req_queued))
1932 return;
1933 }
1934
1935 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
1936 reg |= dwc->u1u2;
1937 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
1938
1939 dwc->u1u2 = 0;
1940 }
72246da4
FB
1941}
1942
72246da4
FB
1943static void dwc3_endpoint_interrupt(struct dwc3 *dwc,
1944 const struct dwc3_event_depevt *event)
1945{
1946 struct dwc3_ep *dep;
1947 u8 epnum = event->endpoint_number;
1948
1949 dep = dwc->eps[epnum];
1950
3336abb5
FB
1951 if (!(dep->flags & DWC3_EP_ENABLED))
1952 return;
1953
72246da4
FB
1954 if (epnum == 0 || epnum == 1) {
1955 dwc3_ep0_interrupt(dwc, event);
1956 return;
1957 }
1958
1959 switch (event->endpoint_event) {
1960 case DWC3_DEPEVT_XFERCOMPLETE:
b4996a86 1961 dep->resource_index = 0;
c2df85ca 1962
16e78db7 1963 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1964 dev_dbg(dwc->dev, "%s is an Isochronous endpoint\n",
1965 dep->name);
1966 return;
1967 }
1968
029d97ff 1969 dwc3_endpoint_transfer_complete(dwc, dep, event);
72246da4
FB
1970 break;
1971 case DWC3_DEPEVT_XFERINPROGRESS:
029d97ff 1972 dwc3_endpoint_transfer_complete(dwc, dep, event);
72246da4
FB
1973 break;
1974 case DWC3_DEPEVT_XFERNOTREADY:
16e78db7 1975 if (usb_endpoint_xfer_isoc(dep->endpoint.desc)) {
72246da4
FB
1976 dwc3_gadget_start_isoc(dwc, dep, event);
1977 } else {
1978 int ret;
1979
1980 dev_vdbg(dwc->dev, "%s: reason %s\n",
40aa41fb
FB
1981 dep->name, event->status &
1982 DEPEVT_STATUS_TRANSFER_ACTIVE
72246da4
FB
1983 ? "Transfer Active"
1984 : "Transfer Not Active");
1985
1986 ret = __dwc3_gadget_kick_transfer(dep, 0, 1);
1987 if (!ret || ret == -EBUSY)
1988 return;
1989
1990 dev_dbg(dwc->dev, "%s: failed to kick transfers\n",
1991 dep->name);
1992 }
1993
879631aa
FB
1994 break;
1995 case DWC3_DEPEVT_STREAMEVT:
16e78db7 1996 if (!usb_endpoint_xfer_bulk(dep->endpoint.desc)) {
879631aa
FB
1997 dev_err(dwc->dev, "Stream event for non-Bulk %s\n",
1998 dep->name);
1999 return;
2000 }
2001
2002 switch (event->status) {
2003 case DEPEVT_STREAMEVT_FOUND:
2004 dev_vdbg(dwc->dev, "Stream %d found and started\n",
2005 event->parameters);
2006
2007 break;
2008 case DEPEVT_STREAMEVT_NOTFOUND:
2009 /* FALLTHROUGH */
2010 default:
2011 dev_dbg(dwc->dev, "Couldn't find suitable stream\n");
2012 }
72246da4
FB
2013 break;
2014 case DWC3_DEPEVT_RXTXFIFOEVT:
2015 dev_dbg(dwc->dev, "%s FIFO Overrun\n", dep->name);
2016 break;
72246da4 2017 case DWC3_DEPEVT_EPCMDCMPLT:
ea53b882 2018 dev_vdbg(dwc->dev, "Endpoint Command Complete\n");
72246da4
FB
2019 break;
2020 }
2021}
2022
2023static void dwc3_disconnect_gadget(struct dwc3 *dwc)
2024{
2025 if (dwc->gadget_driver && dwc->gadget_driver->disconnect) {
2026 spin_unlock(&dwc->lock);
2027 dwc->gadget_driver->disconnect(&dwc->gadget);
2028 spin_lock(&dwc->lock);
2029 }
2030}
2031
bc5ba2e0
FB
2032static void dwc3_suspend_gadget(struct dwc3 *dwc)
2033{
73a30bfc 2034 if (dwc->gadget_driver && dwc->gadget_driver->suspend) {
bc5ba2e0
FB
2035 spin_unlock(&dwc->lock);
2036 dwc->gadget_driver->suspend(&dwc->gadget);
2037 spin_lock(&dwc->lock);
2038 }
2039}
2040
2041static void dwc3_resume_gadget(struct dwc3 *dwc)
2042{
73a30bfc 2043 if (dwc->gadget_driver && dwc->gadget_driver->resume) {
bc5ba2e0
FB
2044 spin_unlock(&dwc->lock);
2045 dwc->gadget_driver->resume(&dwc->gadget);
8e74475b
FB
2046 }
2047}
2048
2049static void dwc3_reset_gadget(struct dwc3 *dwc)
2050{
2051 if (!dwc->gadget_driver)
2052 return;
2053
2054 if (dwc->gadget.speed != USB_SPEED_UNKNOWN) {
2055 spin_unlock(&dwc->lock);
2056 usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver);
bc5ba2e0
FB
2057 spin_lock(&dwc->lock);
2058 }
2059}
2060
b992e681 2061static void dwc3_stop_active_transfer(struct dwc3 *dwc, u32 epnum, bool force)
72246da4
FB
2062{
2063 struct dwc3_ep *dep;
2064 struct dwc3_gadget_ep_cmd_params params;
2065 u32 cmd;
2066 int ret;
2067
2068 dep = dwc->eps[epnum];
2069
b4996a86 2070 if (!dep->resource_index)
3daf74d7
PA
2071 return;
2072
57911504
PA
2073 /*
2074 * NOTICE: We are violating what the Databook says about the
2075 * EndTransfer command. Ideally we would _always_ wait for the
2076 * EndTransfer Command Completion IRQ, but that's causing too
2077 * much trouble synchronizing between us and gadget driver.
2078 *
2079 * We have discussed this with the IP Provider and it was
2080 * suggested to giveback all requests here, but give HW some
2081 * extra time to synchronize with the interconnect. We're using
2082 * an arbitraty 100us delay for that.
2083 *
2084 * Note also that a similar handling was tested by Synopsys
2085 * (thanks a lot Paul) and nothing bad has come out of it.
2086 * In short, what we're doing is:
2087 *
2088 * - Issue EndTransfer WITH CMDIOC bit set
2089 * - Wait 100us
2090 */
2091
3daf74d7 2092 cmd = DWC3_DEPCMD_ENDTRANSFER;
b992e681
PZ
2093 cmd |= force ? DWC3_DEPCMD_HIPRI_FORCERM : 0;
2094 cmd |= DWC3_DEPCMD_CMDIOC;
b4996a86 2095 cmd |= DWC3_DEPCMD_PARAM(dep->resource_index);
3daf74d7
PA
2096 memset(&params, 0, sizeof(params));
2097 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number, cmd, &params);
2098 WARN_ON_ONCE(ret);
b4996a86 2099 dep->resource_index = 0;
041d81f4 2100 dep->flags &= ~DWC3_EP_BUSY;
57911504 2101 udelay(100);
72246da4
FB
2102}
2103
2104static void dwc3_stop_active_transfers(struct dwc3 *dwc)
2105{
2106 u32 epnum;
2107
2108 for (epnum = 2; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2109 struct dwc3_ep *dep;
2110
2111 dep = dwc->eps[epnum];
6a1e3ef4
FB
2112 if (!dep)
2113 continue;
2114
72246da4
FB
2115 if (!(dep->flags & DWC3_EP_ENABLED))
2116 continue;
2117
624407f9 2118 dwc3_remove_requests(dwc, dep);
72246da4
FB
2119 }
2120}
2121
2122static void dwc3_clear_stall_all_ep(struct dwc3 *dwc)
2123{
2124 u32 epnum;
2125
2126 for (epnum = 1; epnum < DWC3_ENDPOINTS_NUM; epnum++) {
2127 struct dwc3_ep *dep;
2128 struct dwc3_gadget_ep_cmd_params params;
2129 int ret;
2130
2131 dep = dwc->eps[epnum];
6a1e3ef4
FB
2132 if (!dep)
2133 continue;
72246da4
FB
2134
2135 if (!(dep->flags & DWC3_EP_STALL))
2136 continue;
2137
2138 dep->flags &= ~DWC3_EP_STALL;
2139
2140 memset(&params, 0, sizeof(params));
2141 ret = dwc3_send_gadget_ep_cmd(dwc, dep->number,
2142 DWC3_DEPCMD_CLEARSTALL, &params);
2143 WARN_ON_ONCE(ret);
2144 }
2145}
2146
2147static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc)
2148{
c4430a26
FB
2149 int reg;
2150
72246da4
FB
2151 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2152 reg &= ~DWC3_DCTL_INITU1ENA;
2153 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2154
2155 reg &= ~DWC3_DCTL_INITU2ENA;
2156 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
72246da4 2157
72246da4 2158 dwc3_disconnect_gadget(dwc);
b23c8439 2159 dwc->start_config_issued = false;
72246da4
FB
2160
2161 dwc->gadget.speed = USB_SPEED_UNKNOWN;
df62df56 2162 dwc->setup_packet_pending = false;
06a374ed 2163 usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED);
72246da4
FB
2164}
2165
72246da4
FB
2166static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc)
2167{
2168 u32 reg;
2169
df62df56
FB
2170 /*
2171 * WORKAROUND: DWC3 revisions <1.88a have an issue which
2172 * would cause a missing Disconnect Event if there's a
2173 * pending Setup Packet in the FIFO.
2174 *
2175 * There's no suggested workaround on the official Bug
2176 * report, which states that "unless the driver/application
2177 * is doing any special handling of a disconnect event,
2178 * there is no functional issue".
2179 *
2180 * Unfortunately, it turns out that we _do_ some special
2181 * handling of a disconnect event, namely complete all
2182 * pending transfers, notify gadget driver of the
2183 * disconnection, and so on.
2184 *
2185 * Our suggested workaround is to follow the Disconnect
2186 * Event steps here, instead, based on a setup_packet_pending
2187 * flag. Such flag gets set whenever we have a XferNotReady
2188 * event on EP0 and gets cleared on XferComplete for the
2189 * same endpoint.
2190 *
2191 * Refers to:
2192 *
2193 * STAR#9000466709: RTL: Device : Disconnect event not
2194 * generated if setup packet pending in FIFO
2195 */
2196 if (dwc->revision < DWC3_REVISION_188A) {
2197 if (dwc->setup_packet_pending)
2198 dwc3_gadget_disconnect_interrupt(dwc);
2199 }
2200
8e74475b 2201 dwc3_reset_gadget(dwc);
72246da4
FB
2202
2203 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2204 reg &= ~DWC3_DCTL_TSTCTRL_MASK;
2205 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
3b637367 2206 dwc->test_mode = false;
72246da4
FB
2207
2208 dwc3_stop_active_transfers(dwc);
2209 dwc3_clear_stall_all_ep(dwc);
b23c8439 2210 dwc->start_config_issued = false;
72246da4
FB
2211
2212 /* Reset device address to zero */
2213 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2214 reg &= ~(DWC3_DCFG_DEVADDR_MASK);
2215 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
72246da4
FB
2216}
2217
2218static void dwc3_update_ram_clk_sel(struct dwc3 *dwc, u32 speed)
2219{
2220 u32 reg;
2221 u32 usb30_clock = DWC3_GCTL_CLK_BUS;
2222
2223 /*
2224 * We change the clock only at SS but I dunno why I would want to do
2225 * this. Maybe it becomes part of the power saving plan.
2226 */
2227
2228 if (speed != DWC3_DSTS_SUPERSPEED)
2229 return;
2230
2231 /*
2232 * RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
2233 * each time on Connect Done.
2234 */
2235 if (!usb30_clock)
2236 return;
2237
2238 reg = dwc3_readl(dwc->regs, DWC3_GCTL);
2239 reg |= DWC3_GCTL_RAMCLKSEL(usb30_clock);
2240 dwc3_writel(dwc->regs, DWC3_GCTL, reg);
2241}
2242
72246da4
FB
2243static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
2244{
72246da4
FB
2245 struct dwc3_ep *dep;
2246 int ret;
2247 u32 reg;
2248 u8 speed;
2249
72246da4
FB
2250 reg = dwc3_readl(dwc->regs, DWC3_DSTS);
2251 speed = reg & DWC3_DSTS_CONNECTSPD;
2252 dwc->speed = speed;
2253
2254 dwc3_update_ram_clk_sel(dwc, speed);
2255
2256 switch (speed) {
2257 case DWC3_DCFG_SUPERSPEED:
05870c5b
FB
2258 /*
2259 * WORKAROUND: DWC3 revisions <1.90a have an issue which
2260 * would cause a missing USB3 Reset event.
2261 *
2262 * In such situations, we should force a USB3 Reset
2263 * event by calling our dwc3_gadget_reset_interrupt()
2264 * routine.
2265 *
2266 * Refers to:
2267 *
2268 * STAR#9000483510: RTL: SS : USB3 reset event may
2269 * not be generated always when the link enters poll
2270 */
2271 if (dwc->revision < DWC3_REVISION_190A)
2272 dwc3_gadget_reset_interrupt(dwc);
2273
72246da4
FB
2274 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2275 dwc->gadget.ep0->maxpacket = 512;
2276 dwc->gadget.speed = USB_SPEED_SUPER;
2277 break;
2278 case DWC3_DCFG_HIGHSPEED:
2279 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2280 dwc->gadget.ep0->maxpacket = 64;
2281 dwc->gadget.speed = USB_SPEED_HIGH;
2282 break;
2283 case DWC3_DCFG_FULLSPEED2:
2284 case DWC3_DCFG_FULLSPEED1:
2285 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
2286 dwc->gadget.ep0->maxpacket = 64;
2287 dwc->gadget.speed = USB_SPEED_FULL;
2288 break;
2289 case DWC3_DCFG_LOWSPEED:
2290 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8);
2291 dwc->gadget.ep0->maxpacket = 8;
2292 dwc->gadget.speed = USB_SPEED_LOW;
2293 break;
2294 }
2295
2b758350
PA
2296 /* Enable USB2 LPM Capability */
2297
2298 if ((dwc->revision > DWC3_REVISION_194A)
2299 && (speed != DWC3_DCFG_SUPERSPEED)) {
2300 reg = dwc3_readl(dwc->regs, DWC3_DCFG);
2301 reg |= DWC3_DCFG_LPM_CAP;
2302 dwc3_writel(dwc->regs, DWC3_DCFG, reg);
2303
2304 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2305 reg &= ~(DWC3_DCTL_HIRD_THRES_MASK | DWC3_DCTL_L1_HIBER_EN);
2306
460d098c 2307 reg |= DWC3_DCTL_HIRD_THRES(dwc->hird_threshold);
2b758350 2308
80caf7d2
HR
2309 /*
2310 * When dwc3 revisions >= 2.40a, LPM Erratum is enabled and
2311 * DCFG.LPMCap is set, core responses with an ACK and the
2312 * BESL value in the LPM token is less than or equal to LPM
2313 * NYET threshold.
2314 */
2315 WARN_ONCE(dwc->revision < DWC3_REVISION_240A
2316 && dwc->has_lpm_erratum,
2317 "LPM Erratum not available on dwc3 revisisions < 2.40a\n");
2318
2319 if (dwc->has_lpm_erratum && dwc->revision >= DWC3_REVISION_240A)
2320 reg |= DWC3_DCTL_LPM_ERRATA(dwc->lpm_nyet_threshold);
2321
356363bf
FB
2322 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2323 } else {
2324 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2325 reg &= ~DWC3_DCTL_HIRD_THRES_MASK;
2b758350
PA
2326 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2327 }
2328
72246da4 2329 dep = dwc->eps[0];
265b70a7
PZ
2330 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2331 false);
72246da4
FB
2332 if (ret) {
2333 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2334 return;
2335 }
2336
2337 dep = dwc->eps[1];
265b70a7
PZ
2338 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, true,
2339 false);
72246da4
FB
2340 if (ret) {
2341 dev_err(dwc->dev, "failed to enable %s\n", dep->name);
2342 return;
2343 }
2344
2345 /*
2346 * Configure PHY via GUSB3PIPECTLn if required.
2347 *
2348 * Update GTXFIFOSIZn
2349 *
2350 * In both cases reset values should be sufficient.
2351 */
2352}
2353
2354static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc)
2355{
72246da4
FB
2356 /*
2357 * TODO take core out of low power mode when that's
2358 * implemented.
2359 */
2360
2361 dwc->gadget_driver->resume(&dwc->gadget);
2362}
2363
2364static void dwc3_gadget_linksts_change_interrupt(struct dwc3 *dwc,
2365 unsigned int evtinfo)
2366{
fae2b904 2367 enum dwc3_link_state next = evtinfo & DWC3_LINK_STATE_MASK;
0b0cc1cd
FB
2368 unsigned int pwropt;
2369
2370 /*
2371 * WORKAROUND: DWC3 < 2.50a have an issue when configured without
2372 * Hibernation mode enabled which would show up when device detects
2373 * host-initiated U3 exit.
2374 *
2375 * In that case, device will generate a Link State Change Interrupt
2376 * from U3 to RESUME which is only necessary if Hibernation is
2377 * configured in.
2378 *
2379 * There are no functional changes due to such spurious event and we
2380 * just need to ignore it.
2381 *
2382 * Refers to:
2383 *
2384 * STAR#9000570034 RTL: SS Resume event generated in non-Hibernation
2385 * operational mode
2386 */
2387 pwropt = DWC3_GHWPARAMS1_EN_PWROPT(dwc->hwparams.hwparams1);
2388 if ((dwc->revision < DWC3_REVISION_250A) &&
2389 (pwropt != DWC3_GHWPARAMS1_EN_PWROPT_HIB)) {
2390 if ((dwc->link_state == DWC3_LINK_STATE_U3) &&
2391 (next == DWC3_LINK_STATE_RESUME)) {
2392 dev_vdbg(dwc->dev, "ignoring transition U3 -> Resume\n");
2393 return;
2394 }
2395 }
fae2b904
FB
2396
2397 /*
2398 * WORKAROUND: DWC3 Revisions <1.83a have an issue which, depending
2399 * on the link partner, the USB session might do multiple entry/exit
2400 * of low power states before a transfer takes place.
2401 *
2402 * Due to this problem, we might experience lower throughput. The
2403 * suggested workaround is to disable DCTL[12:9] bits if we're
2404 * transitioning from U1/U2 to U0 and enable those bits again
2405 * after a transfer completes and there are no pending transfers
2406 * on any of the enabled endpoints.
2407 *
2408 * This is the first half of that workaround.
2409 *
2410 * Refers to:
2411 *
2412 * STAR#9000446952: RTL: Device SS : if U1/U2 ->U0 takes >128us
2413 * core send LGO_Ux entering U0
2414 */
2415 if (dwc->revision < DWC3_REVISION_183A) {
2416 if (next == DWC3_LINK_STATE_U0) {
2417 u32 u1u2;
2418 u32 reg;
2419
2420 switch (dwc->link_state) {
2421 case DWC3_LINK_STATE_U1:
2422 case DWC3_LINK_STATE_U2:
2423 reg = dwc3_readl(dwc->regs, DWC3_DCTL);
2424 u1u2 = reg & (DWC3_DCTL_INITU2ENA
2425 | DWC3_DCTL_ACCEPTU2ENA
2426 | DWC3_DCTL_INITU1ENA
2427 | DWC3_DCTL_ACCEPTU1ENA);
2428
2429 if (!dwc->u1u2)
2430 dwc->u1u2 = reg & u1u2;
2431
2432 reg &= ~u1u2;
2433
2434 dwc3_writel(dwc->regs, DWC3_DCTL, reg);
2435 break;
2436 default:
2437 /* do nothing */
2438 break;
2439 }
2440 }
2441 }
2442
bc5ba2e0
FB
2443 switch (next) {
2444 case DWC3_LINK_STATE_U1:
2445 if (dwc->speed == USB_SPEED_SUPER)
2446 dwc3_suspend_gadget(dwc);
2447 break;
2448 case DWC3_LINK_STATE_U2:
2449 case DWC3_LINK_STATE_U3:
2450 dwc3_suspend_gadget(dwc);
2451 break;
2452 case DWC3_LINK_STATE_RESUME:
2453 dwc3_resume_gadget(dwc);
2454 break;
2455 default:
2456 /* do nothing */
2457 break;
2458 }
2459
e57ebc1d 2460 dwc->link_state = next;
72246da4
FB
2461}
2462
e1dadd3b
FB
2463static void dwc3_gadget_hibernation_interrupt(struct dwc3 *dwc,
2464 unsigned int evtinfo)
2465{
2466 unsigned int is_ss = evtinfo & BIT(4);
2467
2468 /**
2469 * WORKAROUND: DWC3 revison 2.20a with hibernation support
2470 * have a known issue which can cause USB CV TD.9.23 to fail
2471 * randomly.
2472 *
2473 * Because of this issue, core could generate bogus hibernation
2474 * events which SW needs to ignore.
2475 *
2476 * Refers to:
2477 *
2478 * STAR#9000546576: Device Mode Hibernation: Issue in USB 2.0
2479 * Device Fallback from SuperSpeed
2480 */
2481 if (is_ss ^ (dwc->speed == USB_SPEED_SUPER))
2482 return;
2483
2484 /* enter hibernation here */
2485}
2486
72246da4
FB
2487static void dwc3_gadget_interrupt(struct dwc3 *dwc,
2488 const struct dwc3_event_devt *event)
2489{
2490 switch (event->type) {
2491 case DWC3_DEVICE_EVENT_DISCONNECT:
2492 dwc3_gadget_disconnect_interrupt(dwc);
2493 break;
2494 case DWC3_DEVICE_EVENT_RESET:
2495 dwc3_gadget_reset_interrupt(dwc);
2496 break;
2497 case DWC3_DEVICE_EVENT_CONNECT_DONE:
2498 dwc3_gadget_conndone_interrupt(dwc);
2499 break;
2500 case DWC3_DEVICE_EVENT_WAKEUP:
2501 dwc3_gadget_wakeup_interrupt(dwc);
2502 break;
e1dadd3b
FB
2503 case DWC3_DEVICE_EVENT_HIBER_REQ:
2504 if (dev_WARN_ONCE(dwc->dev, !dwc->has_hibernation,
2505 "unexpected hibernation event\n"))
2506 break;
2507
2508 dwc3_gadget_hibernation_interrupt(dwc, event->event_info);
2509 break;
72246da4
FB
2510 case DWC3_DEVICE_EVENT_LINK_STATUS_CHANGE:
2511 dwc3_gadget_linksts_change_interrupt(dwc, event->event_info);
2512 break;
2513 case DWC3_DEVICE_EVENT_EOPF:
2514 dev_vdbg(dwc->dev, "End of Periodic Frame\n");
2515 break;
2516 case DWC3_DEVICE_EVENT_SOF:
2517 dev_vdbg(dwc->dev, "Start of Periodic Frame\n");
2518 break;
2519 case DWC3_DEVICE_EVENT_ERRATIC_ERROR:
2520 dev_vdbg(dwc->dev, "Erratic Error\n");
2521 break;
2522 case DWC3_DEVICE_EVENT_CMD_CMPL:
2523 dev_vdbg(dwc->dev, "Command Complete\n");
2524 break;
2525 case DWC3_DEVICE_EVENT_OVERFLOW:
2526 dev_vdbg(dwc->dev, "Overflow\n");
2527 break;
2528 default:
2529 dev_dbg(dwc->dev, "UNKNOWN IRQ %d\n", event->type);
2530 }
2531}
2532
2533static void dwc3_process_event_entry(struct dwc3 *dwc,
2534 const union dwc3_event *event)
2535{
2c4cbe6e
FB
2536 trace_dwc3_event(event->raw);
2537
72246da4
FB
2538 /* Endpoint IRQ, handle it and return early */
2539 if (event->type.is_devspec == 0) {
2540 /* depevt */
2541 return dwc3_endpoint_interrupt(dwc, &event->depevt);
2542 }
2543
2544 switch (event->type.type) {
2545 case DWC3_EVENT_TYPE_DEV:
2546 dwc3_gadget_interrupt(dwc, &event->devt);
2547 break;
2548 /* REVISIT what to do with Carkit and I2C events ? */
2549 default:
2550 dev_err(dwc->dev, "UNKNOWN IRQ type %d\n", event->raw);
2551 }
2552}
2553
f42f2447 2554static irqreturn_t dwc3_process_event_buf(struct dwc3 *dwc, u32 buf)
b15a762f 2555{
f42f2447 2556 struct dwc3_event_buffer *evt;
b15a762f 2557 irqreturn_t ret = IRQ_NONE;
f42f2447 2558 int left;
e8adfc30 2559 u32 reg;
b15a762f 2560
f42f2447
FB
2561 evt = dwc->ev_buffs[buf];
2562 left = evt->count;
b15a762f 2563
f42f2447
FB
2564 if (!(evt->flags & DWC3_EVENT_PENDING))
2565 return IRQ_NONE;
b15a762f 2566
f42f2447
FB
2567 while (left > 0) {
2568 union dwc3_event event;
b15a762f 2569
f42f2447 2570 event.raw = *(u32 *) (evt->buf + evt->lpos);
b15a762f 2571
f42f2447 2572 dwc3_process_event_entry(dwc, &event);
b15a762f 2573
f42f2447
FB
2574 /*
2575 * FIXME we wrap around correctly to the next entry as
2576 * almost all entries are 4 bytes in size. There is one
2577 * entry which has 12 bytes which is a regular entry
2578 * followed by 8 bytes data. ATM I don't know how
2579 * things are organized if we get next to the a
2580 * boundary so I worry about that once we try to handle
2581 * that.
2582 */
2583 evt->lpos = (evt->lpos + 4) % DWC3_EVENT_BUFFERS_SIZE;
2584 left -= 4;
b15a762f 2585
f42f2447
FB
2586 dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(buf), 4);
2587 }
b15a762f 2588
f42f2447
FB
2589 evt->count = 0;
2590 evt->flags &= ~DWC3_EVENT_PENDING;
2591 ret = IRQ_HANDLED;
b15a762f 2592
f42f2447
FB
2593 /* Unmask interrupt */
2594 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2595 reg &= ~DWC3_GEVNTSIZ_INTMASK;
2596 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
b15a762f 2597
f42f2447
FB
2598 return ret;
2599}
e8adfc30 2600
f42f2447
FB
2601static irqreturn_t dwc3_thread_interrupt(int irq, void *_dwc)
2602{
2603 struct dwc3 *dwc = _dwc;
2604 unsigned long flags;
2605 irqreturn_t ret = IRQ_NONE;
2606 int i;
2607
2608 spin_lock_irqsave(&dwc->lock, flags);
2609
2610 for (i = 0; i < dwc->num_event_buffers; i++)
2611 ret |= dwc3_process_event_buf(dwc, i);
b15a762f
FB
2612
2613 spin_unlock_irqrestore(&dwc->lock, flags);
2614
2615 return ret;
2616}
2617
7f97aa98 2618static irqreturn_t dwc3_check_event_buf(struct dwc3 *dwc, u32 buf)
72246da4
FB
2619{
2620 struct dwc3_event_buffer *evt;
72246da4 2621 u32 count;
e8adfc30 2622 u32 reg;
72246da4 2623
b15a762f
FB
2624 evt = dwc->ev_buffs[buf];
2625
72246da4
FB
2626 count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(buf));
2627 count &= DWC3_GEVNTCOUNT_MASK;
2628 if (!count)
2629 return IRQ_NONE;
2630
b15a762f
FB
2631 evt->count = count;
2632 evt->flags |= DWC3_EVENT_PENDING;
72246da4 2633
e8adfc30
FB
2634 /* Mask interrupt */
2635 reg = dwc3_readl(dwc->regs, DWC3_GEVNTSIZ(buf));
2636 reg |= DWC3_GEVNTSIZ_INTMASK;
2637 dwc3_writel(dwc->regs, DWC3_GEVNTSIZ(buf), reg);
2638
b15a762f 2639 return IRQ_WAKE_THREAD;
72246da4
FB
2640}
2641
2642static irqreturn_t dwc3_interrupt(int irq, void *_dwc)
2643{
2644 struct dwc3 *dwc = _dwc;
2645 int i;
2646 irqreturn_t ret = IRQ_NONE;
2647
2648 spin_lock(&dwc->lock);
2649
9f622b2a 2650 for (i = 0; i < dwc->num_event_buffers; i++) {
72246da4
FB
2651 irqreturn_t status;
2652
7f97aa98 2653 status = dwc3_check_event_buf(dwc, i);
b15a762f 2654 if (status == IRQ_WAKE_THREAD)
72246da4
FB
2655 ret = status;
2656 }
2657
2658 spin_unlock(&dwc->lock);
2659
2660 return ret;
2661}
2662
2663/**
2664 * dwc3_gadget_init - Initializes gadget related registers
1d046793 2665 * @dwc: pointer to our controller context structure
72246da4
FB
2666 *
2667 * Returns 0 on success otherwise negative errno.
2668 */
41ac7b3a 2669int dwc3_gadget_init(struct dwc3 *dwc)
72246da4 2670{
72246da4 2671 int ret;
72246da4
FB
2672
2673 dwc->ctrl_req = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2674 &dwc->ctrl_req_addr, GFP_KERNEL);
2675 if (!dwc->ctrl_req) {
2676 dev_err(dwc->dev, "failed to allocate ctrl request\n");
2677 ret = -ENOMEM;
2678 goto err0;
2679 }
2680
2681 dwc->ep0_trb = dma_alloc_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2682 &dwc->ep0_trb_addr, GFP_KERNEL);
2683 if (!dwc->ep0_trb) {
2684 dev_err(dwc->dev, "failed to allocate ep0 trb\n");
2685 ret = -ENOMEM;
2686 goto err1;
2687 }
2688
3ef35faf 2689 dwc->setup_buf = kzalloc(DWC3_EP0_BOUNCE_SIZE, GFP_KERNEL);
72246da4 2690 if (!dwc->setup_buf) {
72246da4
FB
2691 ret = -ENOMEM;
2692 goto err2;
2693 }
2694
5812b1c2 2695 dwc->ep0_bounce = dma_alloc_coherent(dwc->dev,
3ef35faf
FB
2696 DWC3_EP0_BOUNCE_SIZE, &dwc->ep0_bounce_addr,
2697 GFP_KERNEL);
5812b1c2
FB
2698 if (!dwc->ep0_bounce) {
2699 dev_err(dwc->dev, "failed to allocate ep0 bounce buffer\n");
2700 ret = -ENOMEM;
2701 goto err3;
2702 }
2703
72246da4 2704 dwc->gadget.ops = &dwc3_gadget_ops;
d327ab5b 2705 dwc->gadget.max_speed = USB_SPEED_SUPER;
72246da4 2706 dwc->gadget.speed = USB_SPEED_UNKNOWN;
eeb720fb 2707 dwc->gadget.sg_supported = true;
72246da4
FB
2708 dwc->gadget.name = "dwc3-gadget";
2709
a4b9d94b
DC
2710 /*
2711 * Per databook, DWC3 needs buffer size to be aligned to MaxPacketSize
2712 * on ep out.
2713 */
2714 dwc->gadget.quirk_ep_out_aligned_size = true;
2715
72246da4
FB
2716 /*
2717 * REVISIT: Here we should clear all pending IRQs to be
2718 * sure we're starting from a well known location.
2719 */
2720
2721 ret = dwc3_gadget_init_endpoints(dwc);
2722 if (ret)
5812b1c2 2723 goto err4;
72246da4 2724
72246da4
FB
2725 ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget);
2726 if (ret) {
2727 dev_err(dwc->dev, "failed to register udc\n");
e1f80467 2728 goto err4;
72246da4
FB
2729 }
2730
2731 return 0;
2732
5812b1c2 2733err4:
e1f80467 2734 dwc3_gadget_free_endpoints(dwc);
3ef35faf
FB
2735 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2736 dwc->ep0_bounce, dwc->ep0_bounce_addr);
5812b1c2 2737
72246da4 2738err3:
0fc9a1be 2739 kfree(dwc->setup_buf);
72246da4
FB
2740
2741err2:
2742 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2743 dwc->ep0_trb, dwc->ep0_trb_addr);
2744
2745err1:
2746 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2747 dwc->ctrl_req, dwc->ctrl_req_addr);
2748
2749err0:
2750 return ret;
2751}
2752
7415f17c
FB
2753/* -------------------------------------------------------------------------- */
2754
72246da4
FB
2755void dwc3_gadget_exit(struct dwc3 *dwc)
2756{
72246da4 2757 usb_del_gadget_udc(&dwc->gadget);
72246da4 2758
72246da4
FB
2759 dwc3_gadget_free_endpoints(dwc);
2760
3ef35faf
FB
2761 dma_free_coherent(dwc->dev, DWC3_EP0_BOUNCE_SIZE,
2762 dwc->ep0_bounce, dwc->ep0_bounce_addr);
5812b1c2 2763
0fc9a1be 2764 kfree(dwc->setup_buf);
72246da4
FB
2765
2766 dma_free_coherent(dwc->dev, sizeof(*dwc->ep0_trb),
2767 dwc->ep0_trb, dwc->ep0_trb_addr);
2768
2769 dma_free_coherent(dwc->dev, sizeof(*dwc->ctrl_req),
2770 dwc->ctrl_req, dwc->ctrl_req_addr);
72246da4 2771}
7415f17c 2772
0b0231aa 2773int dwc3_gadget_suspend(struct dwc3 *dwc)
7415f17c 2774{
7b2a0368 2775 if (dwc->pullups_connected) {
7415f17c 2776 dwc3_gadget_disable_irq(dwc);
7b2a0368
FB
2777 dwc3_gadget_run_stop(dwc, true, true);
2778 }
7415f17c 2779
7415f17c
FB
2780 __dwc3_gadget_ep_disable(dwc->eps[0]);
2781 __dwc3_gadget_ep_disable(dwc->eps[1]);
2782
2783 dwc->dcfg = dwc3_readl(dwc->regs, DWC3_DCFG);
2784
2785 return 0;
2786}
2787
2788int dwc3_gadget_resume(struct dwc3 *dwc)
2789{
2790 struct dwc3_ep *dep;
2791 int ret;
2792
2793 /* Start with SuperSpeed Default */
2794 dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
2795
2796 dep = dwc->eps[0];
265b70a7
PZ
2797 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
2798 false);
7415f17c
FB
2799 if (ret)
2800 goto err0;
2801
2802 dep = dwc->eps[1];
265b70a7
PZ
2803 ret = __dwc3_gadget_ep_enable(dep, &dwc3_gadget_ep0_desc, NULL, false,
2804 false);
7415f17c
FB
2805 if (ret)
2806 goto err1;
2807
2808 /* begin to receive SETUP packets */
2809 dwc->ep0state = EP0_SETUP_PHASE;
2810 dwc3_ep0_out_start(dwc);
2811
2812 dwc3_writel(dwc->regs, DWC3_DCFG, dwc->dcfg);
2813
0b0231aa
FB
2814 if (dwc->pullups_connected) {
2815 dwc3_gadget_enable_irq(dwc);
2816 dwc3_gadget_run_stop(dwc, true, false);
2817 }
2818
7415f17c
FB
2819 return 0;
2820
2821err1:
2822 __dwc3_gadget_ep_disable(dwc->eps[0]);
2823
2824err0:
2825 return ret;
2826}