Merge tag 'phy-for-4.12' of git://git.kernel.org/pub/scm/linux/kernel/git/kishon...
[linux-2.6-block.git] / drivers / usb / gadget / udc / amd5536udc.c
CommitLineData
55d402d8
TD
1/*
2 * amd5536.c -- AMD 5536 UDC high/full speed USB device controller
3 *
4 * Copyright (C) 2005-2007 AMD (http://www.amd.com)
5 * Author: Thomas Dahlmann
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
55d402d8
TD
11 */
12
13/*
14 * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536.
15 * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it
16 * provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
17 *
18 * Make sure that UDC is assigned to port 4 by BIOS settings (port can also
19 * be used as host port) and UOC bits PAD_EN and APU are set (should be done
20 * by BIOS init).
21 *
22 * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not
23 * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0")
24 * can be used with gadget ether.
25 */
26
27/* debug control */
28/* #define UDC_VERBOSE */
29
30/* Driver strings */
31#define UDC_MOD_DESCRIPTION "AMD 5536 UDC - USB Device Controller"
c15e03e1 32#define UDC_DRIVER_VERSION_STRING "01.00.0206"
55d402d8
TD
33
34/* system */
35#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/kernel.h>
55d402d8
TD
38#include <linux/delay.h>
39#include <linux/ioport.h>
40#include <linux/sched.h>
41#include <linux/slab.h>
55d402d8 42#include <linux/errno.h>
55d402d8
TD
43#include <linux/timer.h>
44#include <linux/list.h>
45#include <linux/interrupt.h>
46#include <linux/ioctl.h>
47#include <linux/fs.h>
48#include <linux/dmapool.h>
49#include <linux/moduleparam.h>
50#include <linux/device.h>
51#include <linux/io.h>
52#include <linux/irq.h>
b38b03b3 53#include <linux/prefetch.h>
55d402d8
TD
54
55#include <asm/byteorder.h>
55d402d8
TD
56#include <asm/unaligned.h>
57
58/* gadget stack */
59#include <linux/usb/ch9.h>
9454a57a 60#include <linux/usb/gadget.h>
55d402d8
TD
61
62/* udc specific */
63#include "amd5536udc.h"
64
65
66static void udc_tasklet_disconnect(unsigned long);
67static void empty_req_queue(struct udc_ep *);
55d402d8
TD
68static void udc_setup_endpoints(struct udc *dev);
69static void udc_soft_reset(struct udc *dev);
70static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
71static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
55d402d8
TD
72
73/* description */
74static const char mod_desc[] = UDC_MOD_DESCRIPTION;
75static const char name[] = "amd5536udc";
76
77/* structure to hold endpoint function pointers */
78static const struct usb_ep_ops udc_ep_ops;
79
80/* received setup data */
81static union udc_setup_data setup_data;
82
83/* pointer to device object */
84static struct udc *udc;
85
86/* irq spin lock for soft reset */
87static DEFINE_SPINLOCK(udc_irq_spinlock);
88/* stall spin lock */
89static DEFINE_SPINLOCK(udc_stall_spinlock);
90
91/*
92* slave mode: pending bytes in rx fifo after nyet,
93* used if EPIN irq came but no req was available
94*/
95static unsigned int udc_rxfifo_pending;
96
97/* count soft resets after suspend to avoid loop */
98static int soft_reset_occured;
99static int soft_reset_after_usbreset_occured;
100
101/* timer */
102static struct timer_list udc_timer;
103static int stop_timer;
104
105/* set_rde -- Is used to control enabling of RX DMA. Problem is
106 * that UDC has only one bit (RDE) to enable/disable RX DMA for
107 * all OUT endpoints. So we have to handle race conditions like
108 * when OUT data reaches the fifo but no request was queued yet.
109 * This cannot be solved by letting the RX DMA disabled until a
110 * request gets queued because there may be other OUT packets
111 * in the FIFO (important for not blocking control traffic).
112 * The value of set_rde controls the correspondig timer.
113 *
114 * set_rde -1 == not used, means it is alloed to be set to 0 or 1
115 * set_rde 0 == do not touch RDE, do no start the RDE timer
116 * set_rde 1 == timer function will look whether FIFO has data
117 * set_rde 2 == set by timer function to enable RX DMA on next call
118 */
119static int set_rde = -1;
120
121static DECLARE_COMPLETION(on_exit);
122static struct timer_list udc_pollstall_timer;
123static int stop_pollstall_timer;
124static DECLARE_COMPLETION(on_pollstall_exit);
125
126/* tasklet for usb disconnect */
127static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect,
128 (unsigned long) &udc);
129
130
131/* endpoint names used for print */
132static const char ep0_string[] = "ep0in";
6f02ac5a
RB
133static const struct {
134 const char *name;
135 const struct usb_ep_caps caps;
136} ep_info[] = {
137#define EP_INFO(_name, _caps) \
138 { \
139 .name = _name, \
140 .caps = _caps, \
141 }
142
143 EP_INFO(ep0_string,
144 USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_IN)),
145 EP_INFO("ep1in-int",
146 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
147 EP_INFO("ep2in-bulk",
148 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
149 EP_INFO("ep3in-bulk",
150 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
151 EP_INFO("ep4in-bulk",
152 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
153 EP_INFO("ep5in-bulk",
154 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
155 EP_INFO("ep6in-bulk",
156 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
157 EP_INFO("ep7in-bulk",
158 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
159 EP_INFO("ep8in-bulk",
160 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
161 EP_INFO("ep9in-bulk",
162 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
163 EP_INFO("ep10in-bulk",
164 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
165 EP_INFO("ep11in-bulk",
166 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
167 EP_INFO("ep12in-bulk",
168 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
169 EP_INFO("ep13in-bulk",
170 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
171 EP_INFO("ep14in-bulk",
172 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
173 EP_INFO("ep15in-bulk",
174 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
175 EP_INFO("ep0out",
176 USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_OUT)),
177 EP_INFO("ep1out-bulk",
178 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
179 EP_INFO("ep2out-bulk",
180 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
181 EP_INFO("ep3out-bulk",
182 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
183 EP_INFO("ep4out-bulk",
184 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
185 EP_INFO("ep5out-bulk",
186 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
187 EP_INFO("ep6out-bulk",
188 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
189 EP_INFO("ep7out-bulk",
190 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
191 EP_INFO("ep8out-bulk",
192 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
193 EP_INFO("ep9out-bulk",
194 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
195 EP_INFO("ep10out-bulk",
196 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
197 EP_INFO("ep11out-bulk",
198 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
199 EP_INFO("ep12out-bulk",
200 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
201 EP_INFO("ep13out-bulk",
202 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
203 EP_INFO("ep14out-bulk",
204 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
205 EP_INFO("ep15out-bulk",
206 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
207
208#undef EP_INFO
55d402d8
TD
209};
210
211/* DMA usage flag */
90ab5ee9 212static bool use_dma = 1;
55d402d8 213/* packet per buffer dma */
90ab5ee9 214static bool use_dma_ppb = 1;
55d402d8 215/* with per descr. update */
90ab5ee9 216static bool use_dma_ppb_du;
55d402d8
TD
217/* buffer fill mode */
218static int use_dma_bufferfill_mode;
219/* full speed only mode */
90ab5ee9 220static bool use_fullspeed;
55d402d8
TD
221/* tx buffer size for high speed */
222static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
223
224/* module parameters */
225module_param(use_dma, bool, S_IRUGO);
226MODULE_PARM_DESC(use_dma, "true for DMA");
227module_param(use_dma_ppb, bool, S_IRUGO);
228MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode");
229module_param(use_dma_ppb_du, bool, S_IRUGO);
230MODULE_PARM_DESC(use_dma_ppb_du,
231 "true for DMA in packet per buffer mode with descriptor update");
232module_param(use_fullspeed, bool, S_IRUGO);
233MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
234
235/*---------------------------------------------------------------------------*/
236/* Prints UDC device registers and endpoint irq registers */
237static void print_regs(struct udc *dev)
238{
239 DBG(dev, "------- Device registers -------\n");
240 DBG(dev, "dev config = %08x\n", readl(&dev->regs->cfg));
241 DBG(dev, "dev control = %08x\n", readl(&dev->regs->ctl));
242 DBG(dev, "dev status = %08x\n", readl(&dev->regs->sts));
243 DBG(dev, "\n");
244 DBG(dev, "dev int's = %08x\n", readl(&dev->regs->irqsts));
245 DBG(dev, "dev intmask = %08x\n", readl(&dev->regs->irqmsk));
246 DBG(dev, "\n");
247 DBG(dev, "dev ep int's = %08x\n", readl(&dev->regs->ep_irqsts));
248 DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
249 DBG(dev, "\n");
250 DBG(dev, "USE DMA = %d\n", use_dma);
251 if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
252 DBG(dev, "DMA mode = PPBNDU (packet per buffer "
253 "WITHOUT desc. update)\n");
254 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU");
0cf7a633 255 } else if (use_dma && use_dma_ppb && use_dma_ppb_du) {
55d402d8
TD
256 DBG(dev, "DMA mode = PPBDU (packet per buffer "
257 "WITH desc. update)\n");
258 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU");
259 }
260 if (use_dma && use_dma_bufferfill_mode) {
261 DBG(dev, "DMA mode = BF (buffer fill mode)\n");
262 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF");
263 }
170b778f 264 if (!use_dma)
55d402d8 265 dev_info(&dev->pdev->dev, "FIFO mode\n");
55d402d8
TD
266 DBG(dev, "-------------------------------------------------------\n");
267}
268
269/* Masks unused interrupts */
270static int udc_mask_unused_interrupts(struct udc *dev)
271{
272 u32 tmp;
273
274 /* mask all dev interrupts */
275 tmp = AMD_BIT(UDC_DEVINT_SVC) |
276 AMD_BIT(UDC_DEVINT_ENUM) |
277 AMD_BIT(UDC_DEVINT_US) |
278 AMD_BIT(UDC_DEVINT_UR) |
279 AMD_BIT(UDC_DEVINT_ES) |
280 AMD_BIT(UDC_DEVINT_SI) |
281 AMD_BIT(UDC_DEVINT_SOF)|
282 AMD_BIT(UDC_DEVINT_SC);
283 writel(tmp, &dev->regs->irqmsk);
284
285 /* mask all ep interrupts */
286 writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
287
288 return 0;
289}
290
291/* Enables endpoint 0 interrupts */
292static int udc_enable_ep0_interrupts(struct udc *dev)
293{
294 u32 tmp;
295
296 DBG(dev, "udc_enable_ep0_interrupts()\n");
297
298 /* read irq mask */
299 tmp = readl(&dev->regs->ep_irqmsk);
300 /* enable ep0 irq's */
301 tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
302 & AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
303 writel(tmp, &dev->regs->ep_irqmsk);
304
305 return 0;
306}
307
308/* Enables device interrupts for SET_INTF and SET_CONFIG */
309static int udc_enable_dev_setup_interrupts(struct udc *dev)
310{
311 u32 tmp;
312
313 DBG(dev, "enable device interrupts for setup data\n");
314
315 /* read irq mask */
316 tmp = readl(&dev->regs->irqmsk);
317
318 /* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
319 tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
320 & AMD_UNMASK_BIT(UDC_DEVINT_SC)
321 & AMD_UNMASK_BIT(UDC_DEVINT_UR)
322 & AMD_UNMASK_BIT(UDC_DEVINT_SVC)
323 & AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
324 writel(tmp, &dev->regs->irqmsk);
325
326 return 0;
327}
328
25985edc 329/* Calculates fifo start of endpoint based on preceding endpoints */
55d402d8
TD
330static int udc_set_txfifo_addr(struct udc_ep *ep)
331{
332 struct udc *dev;
333 u32 tmp;
334 int i;
335
336 if (!ep || !(ep->in))
337 return -EINVAL;
338
339 dev = ep->dev;
340 ep->txfifo = dev->txfifo;
341
342 /* traverse ep's */
343 for (i = 0; i < ep->num; i++) {
344 if (dev->ep[i].regs) {
345 /* read fifo size */
346 tmp = readl(&dev->ep[i].regs->bufin_framenum);
347 tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
348 ep->txfifo += tmp;
349 }
350 }
351 return 0;
352}
353
354/* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
355static u32 cnak_pending;
356
357static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
358{
359 if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
360 DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
361 cnak_pending |= 1 << (num);
362 ep->naking = 1;
363 } else
364 cnak_pending = cnak_pending & (~(1 << (num)));
365}
366
367
368/* Enables endpoint, is called by gadget driver */
369static int
370udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
371{
372 struct udc_ep *ep;
373 struct udc *dev;
374 u32 tmp;
375 unsigned long iflags;
376 u8 udc_csr_epix;
fd05e720 377 unsigned maxpacket;
55d402d8
TD
378
379 if (!usbep
380 || usbep->name == ep0_string
381 || !desc
382 || desc->bDescriptorType != USB_DT_ENDPOINT)
383 return -EINVAL;
384
385 ep = container_of(usbep, struct udc_ep, ep);
386 dev = ep->dev;
387
388 DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
389
390 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
391 return -ESHUTDOWN;
392
393 spin_lock_irqsave(&dev->lock, iflags);
ef20a72b 394 ep->ep.desc = desc;
55d402d8
TD
395
396 ep->halted = 0;
397
398 /* set traffic type */
399 tmp = readl(&dev->ep[ep->num].regs->ctl);
400 tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
401 writel(tmp, &dev->ep[ep->num].regs->ctl);
402
403 /* set max packet size */
29cc8897 404 maxpacket = usb_endpoint_maxp(desc);
55d402d8 405 tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
fd05e720
AV
406 tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE);
407 ep->ep.maxpacket = maxpacket;
55d402d8
TD
408 writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
409
410 /* IN ep */
411 if (ep->in) {
412
413 /* ep ix in UDC CSR register space */
414 udc_csr_epix = ep->num;
415
416 /* set buffer size (tx fifo entries) */
417 tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
418 /* double buffering: fifo size = 2 x max packet size */
419 tmp = AMD_ADDBITS(
420 tmp,
fd05e720
AV
421 maxpacket * UDC_EPIN_BUFF_SIZE_MULT
422 / UDC_DWORD_BYTES,
55d402d8
TD
423 UDC_EPIN_BUFF_SIZE);
424 writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
425
426 /* calc. tx fifo base addr */
427 udc_set_txfifo_addr(ep);
428
429 /* flush fifo */
430 tmp = readl(&ep->regs->ctl);
431 tmp |= AMD_BIT(UDC_EPCTL_F);
432 writel(tmp, &ep->regs->ctl);
433
434 /* OUT ep */
435 } else {
436 /* ep ix in UDC CSR register space */
437 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
438
439 /* set max packet size UDC CSR */
440 tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
fd05e720 441 tmp = AMD_ADDBITS(tmp, maxpacket,
55d402d8
TD
442 UDC_CSR_NE_MAX_PKT);
443 writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
444
445 if (use_dma && !ep->in) {
446 /* alloc and init BNA dummy request */
447 ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
448 ep->bna_occurred = 0;
449 }
450
451 if (ep->num != UDC_EP0OUT_IX)
452 dev->data_ep_enabled = 1;
453 }
454
455 /* set ep values */
456 tmp = readl(&dev->csr->ne[udc_csr_epix]);
457 /* max packet */
fd05e720 458 tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT);
55d402d8
TD
459 /* ep number */
460 tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
461 /* ep direction */
462 tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
463 /* ep type */
464 tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
465 /* ep config */
466 tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
467 /* ep interface */
468 tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
469 /* ep alt */
470 tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
471 /* write reg */
472 writel(tmp, &dev->csr->ne[udc_csr_epix]);
473
474 /* enable ep irq */
475 tmp = readl(&dev->regs->ep_irqmsk);
476 tmp &= AMD_UNMASK_BIT(ep->num);
477 writel(tmp, &dev->regs->ep_irqmsk);
478
479 /*
480 * clear NAK by writing CNAK
481 * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
482 */
483 if (!use_dma || ep->in) {
484 tmp = readl(&ep->regs->ctl);
485 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
486 writel(tmp, &ep->regs->ctl);
487 ep->naking = 0;
488 UDC_QUEUE_CNAK(ep, ep->num);
489 }
490 tmp = desc->bEndpointAddress;
491 DBG(dev, "%s enabled\n", usbep->name);
492
493 spin_unlock_irqrestore(&dev->lock, iflags);
494 return 0;
495}
496
497/* Resets endpoint */
498static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
499{
500 u32 tmp;
501
502 VDBG(ep->dev, "ep-%d reset\n", ep->num);
f9c56cdd 503 ep->ep.desc = NULL;
55d402d8
TD
504 ep->ep.ops = &udc_ep_ops;
505 INIT_LIST_HEAD(&ep->queue);
506
e117e742 507 usb_ep_set_maxpacket_limit(&ep->ep,(u16) ~0);
55d402d8
TD
508 /* set NAK */
509 tmp = readl(&ep->regs->ctl);
510 tmp |= AMD_BIT(UDC_EPCTL_SNAK);
511 writel(tmp, &ep->regs->ctl);
512 ep->naking = 1;
513
514 /* disable interrupt */
515 tmp = readl(&regs->ep_irqmsk);
516 tmp |= AMD_BIT(ep->num);
517 writel(tmp, &regs->ep_irqmsk);
518
519 if (ep->in) {
520 /* unset P and IN bit of potential former DMA */
521 tmp = readl(&ep->regs->ctl);
522 tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
523 writel(tmp, &ep->regs->ctl);
524
525 tmp = readl(&ep->regs->sts);
526 tmp |= AMD_BIT(UDC_EPSTS_IN);
527 writel(tmp, &ep->regs->sts);
528
529 /* flush the fifo */
530 tmp = readl(&ep->regs->ctl);
531 tmp |= AMD_BIT(UDC_EPCTL_F);
532 writel(tmp, &ep->regs->ctl);
533
534 }
535 /* reset desc pointer */
536 writel(0, &ep->regs->desptr);
537}
538
539/* Disables endpoint, is called by gadget driver */
540static int udc_ep_disable(struct usb_ep *usbep)
541{
542 struct udc_ep *ep = NULL;
543 unsigned long iflags;
544
545 if (!usbep)
546 return -EINVAL;
547
548 ep = container_of(usbep, struct udc_ep, ep);
ef20a72b 549 if (usbep->name == ep0_string || !ep->ep.desc)
55d402d8
TD
550 return -EINVAL;
551
552 DBG(ep->dev, "Disable ep-%d\n", ep->num);
553
554 spin_lock_irqsave(&ep->dev->lock, iflags);
555 udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
556 empty_req_queue(ep);
557 ep_init(ep->dev->regs, ep);
558 spin_unlock_irqrestore(&ep->dev->lock, iflags);
559
560 return 0;
561}
562
563/* Allocates request packet, called by gadget driver */
564static struct usb_request *
565udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
566{
567 struct udc_request *req;
568 struct udc_data_dma *dma_desc;
569 struct udc_ep *ep;
570
571 if (!usbep)
572 return NULL;
573
574 ep = container_of(usbep, struct udc_ep, ep);
575
576 VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
577 req = kzalloc(sizeof(struct udc_request), gfp);
578 if (!req)
579 return NULL;
580
581 req->req.dma = DMA_DONT_USE;
582 INIT_LIST_HEAD(&req->queue);
583
584 if (ep->dma) {
585 /* ep0 in requests are allocated from data pool here */
b5a6a4e5 586 dma_desc = dma_pool_alloc(ep->dev->data_requests, gfp,
55d402d8
TD
587 &req->td_phys);
588 if (!dma_desc) {
589 kfree(req);
590 return NULL;
591 }
592
593 VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
594 "td_phys = %lx\n",
595 req, dma_desc,
596 (unsigned long)req->td_phys);
597 /* prevent from using desc. - set HOST BUSY */
598 dma_desc->status = AMD_ADDBITS(dma_desc->status,
599 UDC_DMA_STP_STS_BS_HOST_BUSY,
600 UDC_DMA_STP_STS_BS);
551509d2 601 dma_desc->bufptr = cpu_to_le32(DMA_DONT_USE);
55d402d8
TD
602 req->td_data = dma_desc;
603 req->td_data_last = NULL;
604 req->chain_len = 1;
605 }
606
607 return &req->req;
608}
609
3719b9bd
SM
610/* frees pci pool descriptors of a DMA chain */
611static int udc_free_dma_chain(struct udc *dev, struct udc_request *req)
612{
613 int ret_val = 0;
614 struct udc_data_dma *td;
615 struct udc_data_dma *td_last = NULL;
616 unsigned int i;
617
618 DBG(dev, "free chain req = %p\n", req);
619
620 /* do not free first desc., will be done by free for request */
621 td_last = req->td_data;
622 td = phys_to_virt(td_last->next);
623
624 for (i = 1; i < req->chain_len; i++) {
b5a6a4e5 625 dma_pool_free(dev->data_requests, td,
3719b9bd
SM
626 (dma_addr_t)td_last->next);
627 td_last = td;
628 td = phys_to_virt(td_last->next);
629 }
630
631 return ret_val;
632}
633
55d402d8
TD
634/* Frees request packet, called by gadget driver */
635static void
636udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
637{
638 struct udc_ep *ep;
639 struct udc_request *req;
640
641 if (!usbep || !usbreq)
642 return;
643
644 ep = container_of(usbep, struct udc_ep, ep);
645 req = container_of(usbreq, struct udc_request, req);
646 VDBG(ep->dev, "free_req req=%p\n", req);
647 BUG_ON(!list_empty(&req->queue));
648 if (req->td_data) {
649 VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
650
651 /* free dma chain if created */
170b778f 652 if (req->chain_len > 1)
55d402d8 653 udc_free_dma_chain(ep->dev, req);
55d402d8 654
b5a6a4e5 655 dma_pool_free(ep->dev->data_requests, req->td_data,
55d402d8
TD
656 req->td_phys);
657 }
658 kfree(req);
659}
660
661/* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
662static void udc_init_bna_dummy(struct udc_request *req)
663{
664 if (req) {
665 /* set last bit */
666 req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
667 /* set next pointer to itself */
668 req->td_data->next = req->td_phys;
669 /* set HOST BUSY */
670 req->td_data->status
671 = AMD_ADDBITS(req->td_data->status,
672 UDC_DMA_STP_STS_BS_DMA_DONE,
673 UDC_DMA_STP_STS_BS);
674#ifdef UDC_VERBOSE
675 pr_debug("bna desc = %p, sts = %08x\n",
676 req->td_data, req->td_data->status);
677#endif
678 }
679}
680
681/* Allocate BNA dummy descriptor */
682static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
683{
684 struct udc_request *req = NULL;
685 struct usb_request *_req = NULL;
686
687 /* alloc the dummy request */
688 _req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
689 if (_req) {
690 req = container_of(_req, struct udc_request, req);
691 ep->bna_dummy_req = req;
692 udc_init_bna_dummy(req);
693 }
694 return req;
695}
696
697/* Write data to TX fifo for IN packets */
698static void
699udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
700{
701 u8 *req_buf;
702 u32 *buf;
703 int i, j;
704 unsigned bytes = 0;
705 unsigned remaining = 0;
706
707 if (!req || !ep)
708 return;
709
710 req_buf = req->buf + req->actual;
711 prefetch(req_buf);
712 remaining = req->length - req->actual;
713
714 buf = (u32 *) req_buf;
715
716 bytes = ep->ep.maxpacket;
717 if (bytes > remaining)
718 bytes = remaining;
719
720 /* dwords first */
170b778f 721 for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
55d402d8 722 writel(*(buf + i), ep->txfifo);
55d402d8
TD
723
724 /* remaining bytes must be written by byte access */
725 for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
726 writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
727 ep->txfifo);
728 }
729
730 /* dummy write confirm */
731 writel(0, &ep->regs->confirm);
732}
733
734/* Read dwords from RX fifo for OUT transfers */
735static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
736{
737 int i;
738
739 VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
740
170b778f 741 for (i = 0; i < dwords; i++)
55d402d8 742 *(buf + i) = readl(dev->rxfifo);
55d402d8
TD
743 return 0;
744}
745
746/* Read bytes from RX fifo for OUT transfers */
747static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
748{
749 int i, j;
750 u32 tmp;
751
752 VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
753
754 /* dwords first */
170b778f 755 for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
55d402d8 756 *((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
55d402d8
TD
757
758 /* remaining bytes must be read by byte access */
759 if (bytes % UDC_DWORD_BYTES) {
760 tmp = readl(dev->rxfifo);
761 for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
762 *(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
763 tmp = tmp >> UDC_BITS_PER_BYTE;
764 }
765 }
766
767 return 0;
768}
769
770/* Read data from RX fifo for OUT transfers */
771static int
772udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
773{
774 u8 *buf;
775 unsigned buf_space;
776 unsigned bytes = 0;
777 unsigned finished = 0;
778
779 /* received number bytes */
780 bytes = readl(&ep->regs->sts);
781 bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
782
783 buf_space = req->req.length - req->req.actual;
784 buf = req->req.buf + req->req.actual;
785 if (bytes > buf_space) {
786 if ((buf_space % ep->ep.maxpacket) != 0) {
787 DBG(ep->dev,
788 "%s: rx %d bytes, rx-buf space = %d bytesn\n",
789 ep->ep.name, bytes, buf_space);
790 req->req.status = -EOVERFLOW;
791 }
792 bytes = buf_space;
793 }
794 req->req.actual += bytes;
795
796 /* last packet ? */
797 if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
798 || ((req->req.actual == req->req.length) && !req->req.zero))
799 finished = 1;
800
801 /* read rx fifo bytes */
802 VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
803 udc_rxfifo_read_bytes(ep->dev, buf, bytes);
804
805 return finished;
806}
807
c9760ad8
SM
808/* Creates or re-inits a DMA chain */
809static int udc_create_dma_chain(
810 struct udc_ep *ep,
811 struct udc_request *req,
812 unsigned long buf_len, gfp_t gfp_flags
813)
814{
815 unsigned long bytes = req->req.length;
816 unsigned int i;
817 dma_addr_t dma_addr;
818 struct udc_data_dma *td = NULL;
819 struct udc_data_dma *last = NULL;
820 unsigned long txbytes;
821 unsigned create_new_chain = 0;
822 unsigned len;
823
824 VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
825 bytes, buf_len);
826 dma_addr = DMA_DONT_USE;
827
828 /* unset L bit in first desc for OUT */
829 if (!ep->in)
830 req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
831
832 /* alloc only new desc's if not already available */
833 len = req->req.length / ep->ep.maxpacket;
834 if (req->req.length % ep->ep.maxpacket)
835 len++;
836
837 if (len > req->chain_len) {
838 /* shorter chain already allocated before */
839 if (req->chain_len > 1)
840 udc_free_dma_chain(ep->dev, req);
841 req->chain_len = len;
842 create_new_chain = 1;
843 }
844
845 td = req->td_data;
846 /* gen. required number of descriptors and buffers */
847 for (i = buf_len; i < bytes; i += buf_len) {
848 /* create or determine next desc. */
849 if (create_new_chain) {
b5a6a4e5 850 td = dma_pool_alloc(ep->dev->data_requests,
c9760ad8
SM
851 gfp_flags, &dma_addr);
852 if (!td)
853 return -ENOMEM;
854
855 td->status = 0;
856 } else if (i == buf_len) {
857 /* first td */
858 td = (struct udc_data_dma *)phys_to_virt(
859 req->td_data->next);
860 td->status = 0;
861 } else {
862 td = (struct udc_data_dma *)phys_to_virt(last->next);
863 td->status = 0;
864 }
865
866 if (td)
867 td->bufptr = req->req.dma + i; /* assign buffer */
868 else
869 break;
870
871 /* short packet ? */
872 if ((bytes - i) >= buf_len) {
873 txbytes = buf_len;
874 } else {
875 /* short packet */
876 txbytes = bytes - i;
877 }
878
879 /* link td and assign tx bytes */
880 if (i == buf_len) {
881 if (create_new_chain)
882 req->td_data->next = dma_addr;
883 /*
884 * else
885 * req->td_data->next = virt_to_phys(td);
886 */
887 /* write tx bytes */
888 if (ep->in) {
889 /* first desc */
890 req->td_data->status =
891 AMD_ADDBITS(req->td_data->status,
892 ep->ep.maxpacket,
893 UDC_DMA_IN_STS_TXBYTES);
894 /* second desc */
895 td->status = AMD_ADDBITS(td->status,
896 txbytes,
897 UDC_DMA_IN_STS_TXBYTES);
898 }
899 } else {
900 if (create_new_chain)
901 last->next = dma_addr;
902 /*
903 * else
904 * last->next = virt_to_phys(td);
905 */
906 if (ep->in) {
907 /* write tx bytes */
908 td->status = AMD_ADDBITS(td->status,
909 txbytes,
910 UDC_DMA_IN_STS_TXBYTES);
911 }
912 }
913 last = td;
914 }
915 /* set last bit */
916 if (td) {
917 td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
918 /* last desc. points to itself */
919 req->td_data_last = td;
920 }
921
922 return 0;
923}
924
55d402d8
TD
925/* create/re-init a DMA descriptor or a DMA descriptor chain */
926static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
927{
928 int retval = 0;
929 u32 tmp;
930
931 VDBG(ep->dev, "prep_dma\n");
932 VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
933 ep->num, req->td_data);
934
935 /* set buffer pointer */
936 req->td_data->bufptr = req->req.dma;
937
938 /* set last bit */
939 req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
940
941 /* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
942 if (use_dma_ppb) {
943
944 retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
945 if (retval != 0) {
946 if (retval == -ENOMEM)
947 DBG(ep->dev, "Out of DMA memory\n");
948 return retval;
949 }
950 if (ep->in) {
951 if (req->req.length == ep->ep.maxpacket) {
952 /* write tx bytes */
953 req->td_data->status =
954 AMD_ADDBITS(req->td_data->status,
955 ep->ep.maxpacket,
956 UDC_DMA_IN_STS_TXBYTES);
957
958 }
959 }
960
961 }
962
963 if (ep->in) {
964 VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
965 "maxpacket=%d ep%d\n",
966 use_dma_ppb, req->req.length,
967 ep->ep.maxpacket, ep->num);
968 /*
969 * if bytes < max packet then tx bytes must
970 * be written in packet per buffer mode
971 */
972 if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
973 || ep->num == UDC_EP0OUT_IX
974 || ep->num == UDC_EP0IN_IX) {
975 /* write tx bytes */
976 req->td_data->status =
977 AMD_ADDBITS(req->td_data->status,
978 req->req.length,
979 UDC_DMA_IN_STS_TXBYTES);
980 /* reset frame num */
981 req->td_data->status =
982 AMD_ADDBITS(req->td_data->status,
983 0,
984 UDC_DMA_IN_STS_FRAMENUM);
985 }
986 /* set HOST BUSY */
987 req->td_data->status =
988 AMD_ADDBITS(req->td_data->status,
989 UDC_DMA_STP_STS_BS_HOST_BUSY,
990 UDC_DMA_STP_STS_BS);
991 } else {
992 VDBG(ep->dev, "OUT set host ready\n");
993 /* set HOST READY */
994 req->td_data->status =
995 AMD_ADDBITS(req->td_data->status,
996 UDC_DMA_STP_STS_BS_HOST_READY,
997 UDC_DMA_STP_STS_BS);
998
999
1000 /* clear NAK by writing CNAK */
1001 if (ep->naking) {
1002 tmp = readl(&ep->regs->ctl);
1003 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1004 writel(tmp, &ep->regs->ctl);
1005 ep->naking = 0;
1006 UDC_QUEUE_CNAK(ep, ep->num);
1007 }
1008
1009 }
1010
1011 return retval;
1012}
1013
1014/* Completes request packet ... caller MUST hold lock */
1015static void
1016complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
1017__releases(ep->dev->lock)
1018__acquires(ep->dev->lock)
1019{
1020 struct udc *dev;
1021 unsigned halted;
1022
1023 VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
1024
1025 dev = ep->dev;
1026 /* unmap DMA */
220e8600
FB
1027 if (ep->dma)
1028 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in);
55d402d8
TD
1029
1030 halted = ep->halted;
1031 ep->halted = 1;
1032
1033 /* set new status if pending */
1034 if (req->req.status == -EINPROGRESS)
1035 req->req.status = sts;
1036
1037 /* remove from ep queue */
1038 list_del_init(&req->queue);
1039
1040 VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
1041 &req->req, req->req.length, ep->ep.name, sts);
1042
1043 spin_unlock(&dev->lock);
304f7e5e 1044 usb_gadget_giveback_request(&ep->ep, &req->req);
55d402d8
TD
1045 spin_lock(&dev->lock);
1046 ep->halted = halted;
1047}
1048
55d402d8
TD
1049/* Iterates to the end of a DMA chain and returns last descriptor */
1050static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
1051{
1052 struct udc_data_dma *td;
1053
1054 td = req->td_data;
170b778f 1055 while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L)))
55d402d8 1056 td = phys_to_virt(td->next);
55d402d8
TD
1057
1058 return td;
1059
1060}
1061
1062/* Iterates to the end of a DMA chain and counts bytes received */
1063static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
1064{
1065 struct udc_data_dma *td;
1066 u32 count;
1067
1068 td = req->td_data;
1069 /* received number bytes */
1070 count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
1071
1072 while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
1073 td = phys_to_virt(td->next);
1074 /* received number bytes */
1075 if (td) {
1076 count += AMD_GETBITS(td->status,
1077 UDC_DMA_OUT_STS_RXBYTES);
1078 }
1079 }
1080
1081 return count;
1082
1083}
1084
55d402d8
TD
1085/* Enabling RX DMA */
1086static void udc_set_rde(struct udc *dev)
1087{
1088 u32 tmp;
1089
1090 VDBG(dev, "udc_set_rde()\n");
1091 /* stop RDE timer */
1092 if (timer_pending(&udc_timer)) {
1093 set_rde = 0;
1094 mod_timer(&udc_timer, jiffies - 1);
1095 }
1096 /* set RDE */
1097 tmp = readl(&dev->regs->ctl);
1098 tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1099 writel(tmp, &dev->regs->ctl);
1100}
1101
1102/* Queues a request packet, called by gadget driver */
1103static int
1104udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
1105{
1106 int retval = 0;
1107 u8 open_rxfifo = 0;
1108 unsigned long iflags;
1109 struct udc_ep *ep;
1110 struct udc_request *req;
1111 struct udc *dev;
1112 u32 tmp;
1113
1114 /* check the inputs */
1115 req = container_of(usbreq, struct udc_request, req);
1116
1117 if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
1118 || !list_empty(&req->queue))
1119 return -EINVAL;
1120
1121 ep = container_of(usbep, struct udc_ep, ep);
ef20a72b 1122 if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
55d402d8
TD
1123 return -EINVAL;
1124
1125 VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
1126 dev = ep->dev;
1127
1128 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
1129 return -ESHUTDOWN;
1130
1131 /* map dma (usually done before) */
220e8600 1132 if (ep->dma) {
55d402d8 1133 VDBG(dev, "DMA map req %p\n", req);
220e8600
FB
1134 retval = usb_gadget_map_request(&udc->gadget, usbreq, ep->in);
1135 if (retval)
1136 return retval;
55d402d8
TD
1137 }
1138
1139 VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
1140 usbep->name, usbreq, usbreq->length,
1141 req->td_data, usbreq->buf);
1142
1143 spin_lock_irqsave(&dev->lock, iflags);
1144 usbreq->actual = 0;
1145 usbreq->status = -EINPROGRESS;
1146 req->dma_done = 0;
1147
1148 /* on empty queue just do first transfer */
1149 if (list_empty(&ep->queue)) {
1150 /* zlp */
1151 if (usbreq->length == 0) {
1152 /* IN zlp's are handled by hardware */
1153 complete_req(ep, req, 0);
1154 VDBG(dev, "%s: zlp\n", ep->ep.name);
1155 /*
1156 * if set_config or set_intf is waiting for ack by zlp
1157 * then set CSR_DONE
1158 */
1159 if (dev->set_cfg_not_acked) {
1160 tmp = readl(&dev->regs->ctl);
1161 tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
1162 writel(tmp, &dev->regs->ctl);
1163 dev->set_cfg_not_acked = 0;
1164 }
1165 /* setup command is ACK'ed now by zlp */
1166 if (dev->waiting_zlp_ack_ep0in) {
1167 /* clear NAK by writing CNAK in EP0_IN */
1168 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1169 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1170 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1171 dev->ep[UDC_EP0IN_IX].naking = 0;
1172 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
1173 UDC_EP0IN_IX);
1174 dev->waiting_zlp_ack_ep0in = 0;
1175 }
1176 goto finished;
1177 }
1178 if (ep->dma) {
ffcba5a5 1179 retval = prep_dma(ep, req, GFP_ATOMIC);
55d402d8
TD
1180 if (retval != 0)
1181 goto finished;
1182 /* write desc pointer to enable DMA */
1183 if (ep->in) {
1184 /* set HOST READY */
1185 req->td_data->status =
1186 AMD_ADDBITS(req->td_data->status,
1187 UDC_DMA_IN_STS_BS_HOST_READY,
1188 UDC_DMA_IN_STS_BS);
1189 }
1190
1191 /* disabled rx dma while descriptor update */
1192 if (!ep->in) {
1193 /* stop RDE timer */
1194 if (timer_pending(&udc_timer)) {
1195 set_rde = 0;
1196 mod_timer(&udc_timer, jiffies - 1);
1197 }
1198 /* clear RDE */
1199 tmp = readl(&dev->regs->ctl);
1200 tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1201 writel(tmp, &dev->regs->ctl);
1202 open_rxfifo = 1;
1203
1204 /*
1205 * if BNA occurred then let BNA dummy desc.
1206 * point to current desc.
1207 */
1208 if (ep->bna_occurred) {
1209 VDBG(dev, "copy to BNA dummy desc.\n");
1210 memcpy(ep->bna_dummy_req->td_data,
1211 req->td_data,
1212 sizeof(struct udc_data_dma));
1213 }
1214 }
1215 /* write desc pointer */
1216 writel(req->td_phys, &ep->regs->desptr);
1217
1218 /* clear NAK by writing CNAK */
1219 if (ep->naking) {
1220 tmp = readl(&ep->regs->ctl);
1221 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1222 writel(tmp, &ep->regs->ctl);
1223 ep->naking = 0;
1224 UDC_QUEUE_CNAK(ep, ep->num);
1225 }
1226
1227 if (ep->in) {
1228 /* enable ep irq */
1229 tmp = readl(&dev->regs->ep_irqmsk);
1230 tmp &= AMD_UNMASK_BIT(ep->num);
1231 writel(tmp, &dev->regs->ep_irqmsk);
1232 }
c5deb832
TD
1233 } else if (ep->in) {
1234 /* enable ep irq */
1235 tmp = readl(&dev->regs->ep_irqmsk);
1236 tmp &= AMD_UNMASK_BIT(ep->num);
1237 writel(tmp, &dev->regs->ep_irqmsk);
1238 }
55d402d8
TD
1239
1240 } else if (ep->dma) {
1241
1242 /*
1243 * prep_dma not used for OUT ep's, this is not possible
1244 * for PPB modes, because of chain creation reasons
1245 */
1246 if (ep->in) {
ffcba5a5 1247 retval = prep_dma(ep, req, GFP_ATOMIC);
55d402d8
TD
1248 if (retval != 0)
1249 goto finished;
1250 }
1251 }
1252 VDBG(dev, "list_add\n");
1253 /* add request to ep queue */
1254 if (req) {
1255
1256 list_add_tail(&req->queue, &ep->queue);
1257
1258 /* open rxfifo if out data queued */
1259 if (open_rxfifo) {
1260 /* enable DMA */
1261 req->dma_going = 1;
1262 udc_set_rde(dev);
1263 if (ep->num != UDC_EP0OUT_IX)
1264 dev->data_ep_queued = 1;
1265 }
1266 /* stop OUT naking */
1267 if (!ep->in) {
1268 if (!use_dma && udc_rxfifo_pending) {
fec8de3a 1269 DBG(dev, "udc_queue(): pending bytes in "
55d402d8
TD
1270 "rxfifo after nyet\n");
1271 /*
1272 * read pending bytes afer nyet:
1273 * referring to isr
1274 */
1275 if (udc_rxfifo_read(ep, req)) {
1276 /* finish */
1277 complete_req(ep, req, 0);
1278 }
1279 udc_rxfifo_pending = 0;
1280
1281 }
1282 }
1283 }
1284
1285finished:
1286 spin_unlock_irqrestore(&dev->lock, iflags);
1287 return retval;
1288}
1289
1290/* Empty request queue of an endpoint; caller holds spinlock */
1291static void empty_req_queue(struct udc_ep *ep)
1292{
1293 struct udc_request *req;
1294
1295 ep->halted = 1;
1296 while (!list_empty(&ep->queue)) {
1297 req = list_entry(ep->queue.next,
1298 struct udc_request,
1299 queue);
1300 complete_req(ep, req, -ESHUTDOWN);
1301 }
1302}
1303
1304/* Dequeues a request packet, called by gadget driver */
1305static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
1306{
1307 struct udc_ep *ep;
1308 struct udc_request *req;
1309 unsigned halted;
1310 unsigned long iflags;
1311
1312 ep = container_of(usbep, struct udc_ep, ep);
ef20a72b 1313 if (!usbep || !usbreq || (!ep->ep.desc && (ep->num != 0
55d402d8
TD
1314 && ep->num != UDC_EP0OUT_IX)))
1315 return -EINVAL;
1316
1317 req = container_of(usbreq, struct udc_request, req);
1318
1319 spin_lock_irqsave(&ep->dev->lock, iflags);
1320 halted = ep->halted;
1321 ep->halted = 1;
1322 /* request in processing or next one */
1323 if (ep->queue.next == &req->queue) {
1324 if (ep->dma && req->dma_going) {
1325 if (ep->in)
1326 ep->cancel_transfer = 1;
1327 else {
1328 u32 tmp;
1329 u32 dma_sts;
1330 /* stop potential receive DMA */
1331 tmp = readl(&udc->regs->ctl);
1332 writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
1333 &udc->regs->ctl);
1334 /*
1335 * Cancel transfer later in ISR
1336 * if descriptor was touched.
1337 */
1338 dma_sts = AMD_GETBITS(req->td_data->status,
1339 UDC_DMA_OUT_STS_BS);
1340 if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
1341 ep->cancel_transfer = 1;
1342 else {
1343 udc_init_bna_dummy(ep->req);
1344 writel(ep->bna_dummy_req->td_phys,
1345 &ep->regs->desptr);
1346 }
1347 writel(tmp, &udc->regs->ctl);
1348 }
1349 }
1350 }
1351 complete_req(ep, req, -ECONNRESET);
1352 ep->halted = halted;
1353
1354 spin_unlock_irqrestore(&ep->dev->lock, iflags);
1355 return 0;
1356}
1357
1358/* Halt or clear halt of endpoint */
1359static int
1360udc_set_halt(struct usb_ep *usbep, int halt)
1361{
1362 struct udc_ep *ep;
1363 u32 tmp;
1364 unsigned long iflags;
1365 int retval = 0;
1366
1367 if (!usbep)
1368 return -EINVAL;
1369
1370 pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
1371
1372 ep = container_of(usbep, struct udc_ep, ep);
ef20a72b 1373 if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
55d402d8
TD
1374 return -EINVAL;
1375 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1376 return -ESHUTDOWN;
1377
1378 spin_lock_irqsave(&udc_stall_spinlock, iflags);
1379 /* halt or clear halt */
1380 if (halt) {
1381 if (ep->num == 0)
1382 ep->dev->stall_ep0in = 1;
1383 else {
1384 /*
1385 * set STALL
1386 * rxfifo empty not taken into acount
1387 */
1388 tmp = readl(&ep->regs->ctl);
1389 tmp |= AMD_BIT(UDC_EPCTL_S);
1390 writel(tmp, &ep->regs->ctl);
1391 ep->halted = 1;
1392
1393 /* setup poll timer */
1394 if (!timer_pending(&udc_pollstall_timer)) {
1395 udc_pollstall_timer.expires = jiffies +
1396 HZ * UDC_POLLSTALL_TIMER_USECONDS
1397 / (1000 * 1000);
1398 if (!stop_pollstall_timer) {
1399 DBG(ep->dev, "start polltimer\n");
1400 add_timer(&udc_pollstall_timer);
1401 }
1402 }
1403 }
1404 } else {
1405 /* ep is halted by set_halt() before */
1406 if (ep->halted) {
1407 tmp = readl(&ep->regs->ctl);
1408 /* clear stall bit */
1409 tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
1410 /* clear NAK by writing CNAK */
1411 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1412 writel(tmp, &ep->regs->ctl);
1413 ep->halted = 0;
1414 UDC_QUEUE_CNAK(ep, ep->num);
1415 }
1416 }
1417 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1418 return retval;
1419}
1420
1421/* gadget interface */
1422static const struct usb_ep_ops udc_ep_ops = {
1423 .enable = udc_ep_enable,
1424 .disable = udc_ep_disable,
1425
1426 .alloc_request = udc_alloc_request,
1427 .free_request = udc_free_request,
1428
1429 .queue = udc_queue,
1430 .dequeue = udc_dequeue,
1431
1432 .set_halt = udc_set_halt,
1433 /* fifo ops not implemented */
1434};
1435
1436/*-------------------------------------------------------------------------*/
1437
1438/* Get frame counter (not implemented) */
1439static int udc_get_frame(struct usb_gadget *gadget)
1440{
1441 return -EOPNOTSUPP;
1442}
1443
79a5b4aa
SM
1444/* Initiates a remote wakeup */
1445static int udc_remote_wakeup(struct udc *dev)
1446{
1447 unsigned long flags;
1448 u32 tmp;
1449
1450 DBG(dev, "UDC initiates remote wakeup\n");
1451
1452 spin_lock_irqsave(&dev->lock, flags);
1453
1454 tmp = readl(&dev->regs->ctl);
1455 tmp |= AMD_BIT(UDC_DEVCTL_RES);
1456 writel(tmp, &dev->regs->ctl);
1457 tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
1458 writel(tmp, &dev->regs->ctl);
1459
1460 spin_unlock_irqrestore(&dev->lock, flags);
1461 return 0;
1462}
1463
55d402d8
TD
1464/* Remote wakeup gadget interface */
1465static int udc_wakeup(struct usb_gadget *gadget)
1466{
1467 struct udc *dev;
1468
1469 if (!gadget)
1470 return -EINVAL;
1471 dev = container_of(gadget, struct udc, gadget);
1472 udc_remote_wakeup(dev);
1473
1474 return 0;
1475}
1476
45005f69
FB
1477static int amd5536_udc_start(struct usb_gadget *g,
1478 struct usb_gadget_driver *driver);
22835b80
FB
1479static int amd5536_udc_stop(struct usb_gadget *g);
1480
55d402d8
TD
1481static const struct usb_gadget_ops udc_ops = {
1482 .wakeup = udc_wakeup,
1483 .get_frame = udc_get_frame,
45005f69
FB
1484 .udc_start = amd5536_udc_start,
1485 .udc_stop = amd5536_udc_stop,
55d402d8
TD
1486};
1487
1488/* Setups endpoint parameters, adds endpoints to linked list */
1489static void make_ep_lists(struct udc *dev)
1490{
1491 /* make gadget ep lists */
1492 INIT_LIST_HEAD(&dev->gadget.ep_list);
1493 list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
1494 &dev->gadget.ep_list);
1495 list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
1496 &dev->gadget.ep_list);
1497 list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
1498 &dev->gadget.ep_list);
1499
1500 /* fifo config */
1501 dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
1502 if (dev->gadget.speed == USB_SPEED_FULL)
1503 dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
1504 else if (dev->gadget.speed == USB_SPEED_HIGH)
1505 dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
1506 dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
1507}
1508
55d402d8
TD
1509/* Inits UDC context */
1510static void udc_basic_init(struct udc *dev)
1511{
1512 u32 tmp;
1513
1514 DBG(dev, "udc_basic_init()\n");
1515
1516 dev->gadget.speed = USB_SPEED_UNKNOWN;
1517
1518 /* stop RDE timer */
1519 if (timer_pending(&udc_timer)) {
1520 set_rde = 0;
1521 mod_timer(&udc_timer, jiffies - 1);
1522 }
1523 /* stop poll stall timer */
170b778f 1524 if (timer_pending(&udc_pollstall_timer))
55d402d8 1525 mod_timer(&udc_pollstall_timer, jiffies - 1);
55d402d8
TD
1526 /* disable DMA */
1527 tmp = readl(&dev->regs->ctl);
1528 tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1529 tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
1530 writel(tmp, &dev->regs->ctl);
1531
1532 /* enable dynamic CSR programming */
1533 tmp = readl(&dev->regs->cfg);
1534 tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
1535 /* set self powered */
1536 tmp |= AMD_BIT(UDC_DEVCFG_SP);
1537 /* set remote wakeupable */
1538 tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
1539 writel(tmp, &dev->regs->cfg);
1540
1541 make_ep_lists(dev);
1542
1543 dev->data_ep_enabled = 0;
1544 dev->data_ep_queued = 0;
1545}
1546
5d31a17b
SM
1547/* init registers at driver load time */
1548static int startup_registers(struct udc *dev)
1549{
1550 u32 tmp;
1551
1552 /* init controller by soft reset */
1553 udc_soft_reset(dev);
1554
1555 /* mask not needed interrupts */
1556 udc_mask_unused_interrupts(dev);
1557
1558 /* put into initial config */
1559 udc_basic_init(dev);
1560 /* link up all endpoints */
1561 udc_setup_endpoints(dev);
1562
1563 /* program speed */
1564 tmp = readl(&dev->regs->cfg);
1565 if (use_fullspeed)
1566 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1567 else
1568 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
1569 writel(tmp, &dev->regs->cfg);
1570
1571 return 0;
1572}
1573
55d402d8
TD
1574/* Sets initial endpoint parameters */
1575static void udc_setup_endpoints(struct udc *dev)
1576{
1577 struct udc_ep *ep;
1578 u32 tmp;
1579 u32 reg;
1580
1581 DBG(dev, "udc_setup_endpoints()\n");
1582
1583 /* read enum speed */
1584 tmp = readl(&dev->regs->sts);
1585 tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
170b778f 1586 if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH)
55d402d8 1587 dev->gadget.speed = USB_SPEED_HIGH;
170b778f 1588 else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL)
55d402d8 1589 dev->gadget.speed = USB_SPEED_FULL;
55d402d8
TD
1590
1591 /* set basic ep parameters */
1592 for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
1593 ep = &dev->ep[tmp];
1594 ep->dev = dev;
6f02ac5a
RB
1595 ep->ep.name = ep_info[tmp].name;
1596 ep->ep.caps = ep_info[tmp].caps;
55d402d8
TD
1597 ep->num = tmp;
1598 /* txfifo size is calculated at enable time */
1599 ep->txfifo = dev->txfifo;
1600
1601 /* fifo size */
1602 if (tmp < UDC_EPIN_NUM) {
1603 ep->fifo_depth = UDC_TXFIFO_SIZE;
1604 ep->in = 1;
1605 } else {
1606 ep->fifo_depth = UDC_RXFIFO_SIZE;
1607 ep->in = 0;
1608
1609 }
1610 ep->regs = &dev->ep_regs[tmp];
1611 /*
1612 * ep will be reset only if ep was not enabled before to avoid
1613 * disabling ep interrupts when ENUM interrupt occurs but ep is
1614 * not enabled by gadget driver
1615 */
ef20a72b 1616 if (!ep->ep.desc)
55d402d8 1617 ep_init(dev->regs, ep);
55d402d8
TD
1618
1619 if (use_dma) {
1620 /*
1621 * ep->dma is not really used, just to indicate that
1622 * DMA is active: remove this
1623 * dma regs = dev control regs
1624 */
1625 ep->dma = &dev->regs->ctl;
1626
1627 /* nak OUT endpoints until enable - not for ep0 */
1628 if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
1629 && tmp > UDC_EPIN_NUM) {
1630 /* set NAK */
1631 reg = readl(&dev->ep[tmp].regs->ctl);
1632 reg |= AMD_BIT(UDC_EPCTL_SNAK);
1633 writel(reg, &dev->ep[tmp].regs->ctl);
1634 dev->ep[tmp].naking = 1;
1635
1636 }
1637 }
1638 }
1639 /* EP0 max packet */
1640 if (dev->gadget.speed == USB_SPEED_FULL) {
e117e742
RB
1641 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
1642 UDC_FS_EP0IN_MAX_PKT_SIZE);
1643 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
1644 UDC_FS_EP0OUT_MAX_PKT_SIZE);
55d402d8 1645 } else if (dev->gadget.speed == USB_SPEED_HIGH) {
e117e742
RB
1646 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
1647 UDC_EP0IN_MAX_PKT_SIZE);
1648 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
1649 UDC_EP0OUT_MAX_PKT_SIZE);
55d402d8
TD
1650 }
1651
1652 /*
1653 * with suspend bug workaround, ep0 params for gadget driver
1654 * are set at gadget driver bind() call
1655 */
1656 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
1657 dev->ep[UDC_EP0IN_IX].halted = 0;
1658 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1659
1660 /* init cfg/alt/int */
1661 dev->cur_config = 0;
1662 dev->cur_intf = 0;
1663 dev->cur_alt = 0;
1664}
1665
1666/* Bringup after Connect event, initial bringup to be ready for ep0 events */
1667static void usb_connect(struct udc *dev)
1668{
1669
1670 dev_info(&dev->pdev->dev, "USB Connect\n");
1671
1672 dev->connected = 1;
1673
1674 /* put into initial config */
1675 udc_basic_init(dev);
1676
1677 /* enable device setup interrupts */
1678 udc_enable_dev_setup_interrupts(dev);
1679}
1680
1681/*
1682 * Calls gadget with disconnect event and resets the UDC and makes
1683 * initial bringup to be ready for ep0 events
1684 */
1685static void usb_disconnect(struct udc *dev)
1686{
1687
1688 dev_info(&dev->pdev->dev, "USB Disconnect\n");
1689
1690 dev->connected = 0;
1691
1692 /* mask interrupts */
1693 udc_mask_unused_interrupts(dev);
1694
1695 /* REVISIT there doesn't seem to be a point to having this
1696 * talk to a tasklet ... do it directly, we already hold
1697 * the spinlock needed to process the disconnect.
1698 */
1699
1700 tasklet_schedule(&disconnect_tasklet);
1701}
1702
1703/* Tasklet for disconnect to be outside of interrupt context */
1704static void udc_tasklet_disconnect(unsigned long par)
1705{
1706 struct udc *dev = (struct udc *)(*((struct udc **) par));
1707 u32 tmp;
1708
1709 DBG(dev, "Tasklet disconnect\n");
1710 spin_lock_irq(&dev->lock);
1711
1712 if (dev->driver) {
1713 spin_unlock(&dev->lock);
1714 dev->driver->disconnect(&dev->gadget);
1715 spin_lock(&dev->lock);
1716
1717 /* empty queues */
170b778f 1718 for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
55d402d8 1719 empty_req_queue(&dev->ep[tmp]);
55d402d8
TD
1720
1721 }
1722
1723 /* disable ep0 */
1724 ep_init(dev->regs,
1725 &dev->ep[UDC_EP0IN_IX]);
1726
1727
1728 if (!soft_reset_occured) {
1729 /* init controller by soft reset */
1730 udc_soft_reset(dev);
1731 soft_reset_occured++;
1732 }
1733
1734 /* re-enable dev interrupts */
1735 udc_enable_dev_setup_interrupts(dev);
1736 /* back to full speed ? */
1737 if (use_fullspeed) {
1738 tmp = readl(&dev->regs->cfg);
1739 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1740 writel(tmp, &dev->regs->cfg);
1741 }
1742
1743 spin_unlock_irq(&dev->lock);
1744}
1745
1746/* Reset the UDC core */
1747static void udc_soft_reset(struct udc *dev)
1748{
1749 unsigned long flags;
1750
1751 DBG(dev, "Soft reset\n");
1752 /*
1753 * reset possible waiting interrupts, because int.
1754 * status is lost after soft reset,
1755 * ep int. status reset
1756 */
1757 writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
1758 /* device int. status reset */
1759 writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
1760
1761 spin_lock_irqsave(&udc_irq_spinlock, flags);
1762 writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
1763 readl(&dev->regs->cfg);
1764 spin_unlock_irqrestore(&udc_irq_spinlock, flags);
1765
1766}
1767
1768/* RDE timer callback to set RDE bit */
1769static void udc_timer_function(unsigned long v)
1770{
1771 u32 tmp;
1772
1773 spin_lock_irq(&udc_irq_spinlock);
1774
1775 if (set_rde > 0) {
1776 /*
1777 * open the fifo if fifo was filled on last timer call
1778 * conditionally
1779 */
1780 if (set_rde > 1) {
1781 /* set RDE to receive setup data */
1782 tmp = readl(&udc->regs->ctl);
1783 tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1784 writel(tmp, &udc->regs->ctl);
1785 set_rde = -1;
1786 } else if (readl(&udc->regs->sts)
1787 & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
1788 /*
1789 * if fifo empty setup polling, do not just
1790 * open the fifo
1791 */
1792 udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
170b778f 1793 if (!stop_timer)
55d402d8 1794 add_timer(&udc_timer);
55d402d8
TD
1795 } else {
1796 /*
1797 * fifo contains data now, setup timer for opening
1798 * the fifo when timer expires to be able to receive
1799 * setup packets, when data packets gets queued by
1800 * gadget layer then timer will forced to expire with
1801 * set_rde=0 (RDE is set in udc_queue())
1802 */
1803 set_rde++;
1804 /* debug: lhadmot_timer_start = 221070 */
1805 udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
170b778f 1806 if (!stop_timer)
55d402d8 1807 add_timer(&udc_timer);
55d402d8
TD
1808 }
1809
1810 } else
1811 set_rde = -1; /* RDE was set by udc_queue() */
1812 spin_unlock_irq(&udc_irq_spinlock);
1813 if (stop_timer)
1814 complete(&on_exit);
1815
1816}
1817
1818/* Handle halt state, used in stall poll timer */
1819static void udc_handle_halt_state(struct udc_ep *ep)
1820{
1821 u32 tmp;
1822 /* set stall as long not halted */
1823 if (ep->halted == 1) {
1824 tmp = readl(&ep->regs->ctl);
1825 /* STALL cleared ? */
1826 if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
1827 /*
1828 * FIXME: MSC spec requires that stall remains
1829 * even on receivng of CLEAR_FEATURE HALT. So
1830 * we would set STALL again here to be compliant.
1831 * But with current mass storage drivers this does
1832 * not work (would produce endless host retries).
1833 * So we clear halt on CLEAR_FEATURE.
1834 *
1835 DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
1836 tmp |= AMD_BIT(UDC_EPCTL_S);
1837 writel(tmp, &ep->regs->ctl);*/
1838
1839 /* clear NAK by writing CNAK */
1840 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1841 writel(tmp, &ep->regs->ctl);
1842 ep->halted = 0;
1843 UDC_QUEUE_CNAK(ep, ep->num);
1844 }
1845 }
1846}
1847
1848/* Stall timer callback to poll S bit and set it again after */
1849static void udc_pollstall_timer_function(unsigned long v)
1850{
1851 struct udc_ep *ep;
1852 int halted = 0;
1853
1854 spin_lock_irq(&udc_stall_spinlock);
1855 /*
1856 * only one IN and OUT endpoints are handled
1857 * IN poll stall
1858 */
1859 ep = &udc->ep[UDC_EPIN_IX];
1860 udc_handle_halt_state(ep);
1861 if (ep->halted)
1862 halted = 1;
1863 /* OUT poll stall */
1864 ep = &udc->ep[UDC_EPOUT_IX];
1865 udc_handle_halt_state(ep);
1866 if (ep->halted)
1867 halted = 1;
1868
1869 /* setup timer again when still halted */
1870 if (!stop_pollstall_timer && halted) {
1871 udc_pollstall_timer.expires = jiffies +
1872 HZ * UDC_POLLSTALL_TIMER_USECONDS
1873 / (1000 * 1000);
1874 add_timer(&udc_pollstall_timer);
1875 }
1876 spin_unlock_irq(&udc_stall_spinlock);
1877
1878 if (stop_pollstall_timer)
1879 complete(&on_pollstall_exit);
1880}
1881
1882/* Inits endpoint 0 so that SETUP packets are processed */
1883static void activate_control_endpoints(struct udc *dev)
1884{
1885 u32 tmp;
1886
1887 DBG(dev, "activate_control_endpoints\n");
1888
1889 /* flush fifo */
1890 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1891 tmp |= AMD_BIT(UDC_EPCTL_F);
1892 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1893
1894 /* set ep0 directions */
1895 dev->ep[UDC_EP0IN_IX].in = 1;
1896 dev->ep[UDC_EP0OUT_IX].in = 0;
1897
1898 /* set buffer size (tx fifo entries) of EP0_IN */
1899 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1900 if (dev->gadget.speed == USB_SPEED_FULL)
1901 tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
1902 UDC_EPIN_BUFF_SIZE);
1903 else if (dev->gadget.speed == USB_SPEED_HIGH)
1904 tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
1905 UDC_EPIN_BUFF_SIZE);
1906 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1907
1908 /* set max packet size of EP0_IN */
1909 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1910 if (dev->gadget.speed == USB_SPEED_FULL)
1911 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
1912 UDC_EP_MAX_PKT_SIZE);
1913 else if (dev->gadget.speed == USB_SPEED_HIGH)
1914 tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
1915 UDC_EP_MAX_PKT_SIZE);
1916 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1917
1918 /* set max packet size of EP0_OUT */
1919 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1920 if (dev->gadget.speed == USB_SPEED_FULL)
1921 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1922 UDC_EP_MAX_PKT_SIZE);
1923 else if (dev->gadget.speed == USB_SPEED_HIGH)
1924 tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1925 UDC_EP_MAX_PKT_SIZE);
1926 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1927
1928 /* set max packet size of EP0 in UDC CSR */
1929 tmp = readl(&dev->csr->ne[0]);
1930 if (dev->gadget.speed == USB_SPEED_FULL)
1931 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1932 UDC_CSR_NE_MAX_PKT);
1933 else if (dev->gadget.speed == USB_SPEED_HIGH)
1934 tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1935 UDC_CSR_NE_MAX_PKT);
1936 writel(tmp, &dev->csr->ne[0]);
1937
1938 if (use_dma) {
1939 dev->ep[UDC_EP0OUT_IX].td->status |=
1940 AMD_BIT(UDC_DMA_OUT_STS_L);
1941 /* write dma desc address */
1942 writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
1943 &dev->ep[UDC_EP0OUT_IX].regs->subptr);
1944 writel(dev->ep[UDC_EP0OUT_IX].td_phys,
1945 &dev->ep[UDC_EP0OUT_IX].regs->desptr);
1946 /* stop RDE timer */
1947 if (timer_pending(&udc_timer)) {
1948 set_rde = 0;
1949 mod_timer(&udc_timer, jiffies - 1);
1950 }
1951 /* stop pollstall timer */
170b778f 1952 if (timer_pending(&udc_pollstall_timer))
55d402d8 1953 mod_timer(&udc_pollstall_timer, jiffies - 1);
55d402d8
TD
1954 /* enable DMA */
1955 tmp = readl(&dev->regs->ctl);
1956 tmp |= AMD_BIT(UDC_DEVCTL_MODE)
1957 | AMD_BIT(UDC_DEVCTL_RDE)
1958 | AMD_BIT(UDC_DEVCTL_TDE);
170b778f 1959 if (use_dma_bufferfill_mode)
55d402d8 1960 tmp |= AMD_BIT(UDC_DEVCTL_BF);
170b778f 1961 else if (use_dma_ppb_du)
55d402d8 1962 tmp |= AMD_BIT(UDC_DEVCTL_DU);
55d402d8
TD
1963 writel(tmp, &dev->regs->ctl);
1964 }
1965
1966 /* clear NAK by writing CNAK for EP0IN */
1967 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1968 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1969 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1970 dev->ep[UDC_EP0IN_IX].naking = 0;
1971 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
1972
1973 /* clear NAK by writing CNAK for EP0OUT */
1974 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
1975 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1976 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
1977 dev->ep[UDC_EP0OUT_IX].naking = 0;
1978 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
1979}
1980
1981/* Make endpoint 0 ready for control traffic */
1982static int setup_ep0(struct udc *dev)
1983{
1984 activate_control_endpoints(dev);
1985 /* enable ep0 interrupts */
1986 udc_enable_ep0_interrupts(dev);
1987 /* enable device setup interrupts */
1988 udc_enable_dev_setup_interrupts(dev);
1989
1990 return 0;
1991}
1992
1993/* Called by gadget driver to register itself */
45005f69
FB
1994static int amd5536_udc_start(struct usb_gadget *g,
1995 struct usb_gadget_driver *driver)
55d402d8 1996{
45005f69 1997 struct udc *dev = to_amd5536_udc(g);
55d402d8
TD
1998 u32 tmp;
1999
55d402d8
TD
2000 driver->driver.bus = NULL;
2001 dev->driver = driver;
55d402d8 2002
55d402d8
TD
2003 /* Some gadget drivers use both ep0 directions.
2004 * NOTE: to gadget driver, ep0 is just one endpoint...
2005 */
2006 dev->ep[UDC_EP0OUT_IX].ep.driver_data =
2007 dev->ep[UDC_EP0IN_IX].ep.driver_data;
2008
55d402d8
TD
2009 /* get ready for ep0 traffic */
2010 setup_ep0(dev);
2011
2012 /* clear SD */
2013 tmp = readl(&dev->regs->ctl);
2014 tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
2015 writel(tmp, &dev->regs->ctl);
2016
2017 usb_connect(dev);
2018
2019 return 0;
2020}
55d402d8
TD
2021
2022/* shutdown requests and disconnect from gadget */
2023static void
2024shutdown(struct udc *dev, struct usb_gadget_driver *driver)
2025__releases(dev->lock)
2026__acquires(dev->lock)
2027{
2028 int tmp;
2029
c5deb832
TD
2030 /* empty queues and init hardware */
2031 udc_basic_init(dev);
45005f69 2032
c5deb832
TD
2033 for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
2034 empty_req_queue(&dev->ep[tmp]);
2035
55d402d8
TD
2036 udc_setup_endpoints(dev);
2037}
2038
2039/* Called by gadget driver to unregister itself */
22835b80 2040static int amd5536_udc_stop(struct usb_gadget *g)
55d402d8 2041{
45005f69
FB
2042 struct udc *dev = to_amd5536_udc(g);
2043 unsigned long flags;
55d402d8
TD
2044 u32 tmp;
2045
55d402d8
TD
2046 spin_lock_irqsave(&dev->lock, flags);
2047 udc_mask_unused_interrupts(dev);
21090f06 2048 shutdown(dev, NULL);
55d402d8
TD
2049 spin_unlock_irqrestore(&dev->lock, flags);
2050
55d402d8
TD
2051 dev->driver = NULL;
2052
2053 /* set SD */
2054 tmp = readl(&dev->regs->ctl);
2055 tmp |= AMD_BIT(UDC_DEVCTL_SD);
2056 writel(tmp, &dev->regs->ctl);
2057
55d402d8
TD
2058 return 0;
2059}
55d402d8
TD
2060
2061/* Clear pending NAK bits */
2062static void udc_process_cnak_queue(struct udc *dev)
2063{
2064 u32 tmp;
2065 u32 reg;
2066
2067 /* check epin's */
2068 DBG(dev, "CNAK pending queue processing\n");
2069 for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
2070 if (cnak_pending & (1 << tmp)) {
2071 DBG(dev, "CNAK pending for ep%d\n", tmp);
2072 /* clear NAK by writing CNAK */
2073 reg = readl(&dev->ep[tmp].regs->ctl);
2074 reg |= AMD_BIT(UDC_EPCTL_CNAK);
2075 writel(reg, &dev->ep[tmp].regs->ctl);
2076 dev->ep[tmp].naking = 0;
2077 UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
2078 }
2079 }
2080 /* ... and ep0out */
2081 if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
2082 DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
2083 /* clear NAK by writing CNAK */
2084 reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2085 reg |= AMD_BIT(UDC_EPCTL_CNAK);
2086 writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2087 dev->ep[UDC_EP0OUT_IX].naking = 0;
2088 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
2089 dev->ep[UDC_EP0OUT_IX].num);
2090 }
2091}
2092
2093/* Enabling RX DMA after setup packet */
2094static void udc_ep0_set_rde(struct udc *dev)
2095{
2096 if (use_dma) {
2097 /*
2098 * only enable RXDMA when no data endpoint enabled
2099 * or data is queued
2100 */
2101 if (!dev->data_ep_enabled || dev->data_ep_queued) {
2102 udc_set_rde(dev);
2103 } else {
2104 /*
2105 * setup timer for enabling RDE (to not enable
2106 * RXFIFO DMA for data endpoints to early)
2107 */
2108 if (set_rde != 0 && !timer_pending(&udc_timer)) {
2109 udc_timer.expires =
2110 jiffies + HZ/UDC_RDE_TIMER_DIV;
2111 set_rde = 1;
170b778f 2112 if (!stop_timer)
55d402d8 2113 add_timer(&udc_timer);
55d402d8
TD
2114 }
2115 }
2116 }
2117}
2118
2119
2120/* Interrupt handler for data OUT traffic */
2121static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
2122{
2123 irqreturn_t ret_val = IRQ_NONE;
2124 u32 tmp;
2125 struct udc_ep *ep;
2126 struct udc_request *req;
2127 unsigned int count;
2128 struct udc_data_dma *td = NULL;
2129 unsigned dma_done;
2130
2131 VDBG(dev, "ep%d irq\n", ep_ix);
2132 ep = &dev->ep[ep_ix];
2133
2134 tmp = readl(&ep->regs->sts);
2135 if (use_dma) {
2136 /* BNA event ? */
2137 if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
5647a149 2138 DBG(dev, "BNA ep%dout occurred - DESPTR = %x\n",
55d402d8
TD
2139 ep->num, readl(&ep->regs->desptr));
2140 /* clear BNA */
2141 writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
2142 if (!ep->cancel_transfer)
2143 ep->bna_occurred = 1;
2144 else
2145 ep->cancel_transfer = 0;
2146 ret_val = IRQ_HANDLED;
2147 goto finished;
2148 }
2149 }
2150 /* HE event ? */
2151 if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
25985edc 2152 dev_err(&dev->pdev->dev, "HE ep%dout occurred\n", ep->num);
55d402d8
TD
2153
2154 /* clear HE */
2155 writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2156 ret_val = IRQ_HANDLED;
2157 goto finished;
2158 }
2159
2160 if (!list_empty(&ep->queue)) {
2161
2162 /* next request */
2163 req = list_entry(ep->queue.next,
2164 struct udc_request, queue);
2165 } else {
2166 req = NULL;
2167 udc_rxfifo_pending = 1;
2168 }
2169 VDBG(dev, "req = %p\n", req);
2170 /* fifo mode */
2171 if (!use_dma) {
2172
2173 /* read fifo */
2174 if (req && udc_rxfifo_read(ep, req)) {
2175 ret_val = IRQ_HANDLED;
2176
2177 /* finish */
2178 complete_req(ep, req, 0);
2179 /* next request */
2180 if (!list_empty(&ep->queue) && !ep->halted) {
2181 req = list_entry(ep->queue.next,
2182 struct udc_request, queue);
2183 } else
2184 req = NULL;
2185 }
2186
2187 /* DMA */
1b701508 2188 } else if (!ep->cancel_transfer && req) {
55d402d8
TD
2189 ret_val = IRQ_HANDLED;
2190
2191 /* check for DMA done */
2192 if (!use_dma_ppb) {
2193 dma_done = AMD_GETBITS(req->td_data->status,
2194 UDC_DMA_OUT_STS_BS);
2195 /* packet per buffer mode - rx bytes */
2196 } else {
2197 /*
2198 * if BNA occurred then recover desc. from
2199 * BNA dummy desc.
2200 */
2201 if (ep->bna_occurred) {
2202 VDBG(dev, "Recover desc. from BNA dummy\n");
2203 memcpy(req->td_data, ep->bna_dummy_req->td_data,
2204 sizeof(struct udc_data_dma));
2205 ep->bna_occurred = 0;
2206 udc_init_bna_dummy(ep->req);
2207 }
2208 td = udc_get_last_dma_desc(req);
2209 dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
2210 }
2211 if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
2212 /* buffer fill mode - rx bytes */
2213 if (!use_dma_ppb) {
2214 /* received number bytes */
2215 count = AMD_GETBITS(req->td_data->status,
2216 UDC_DMA_OUT_STS_RXBYTES);
2217 VDBG(dev, "rx bytes=%u\n", count);
2218 /* packet per buffer mode - rx bytes */
2219 } else {
2220 VDBG(dev, "req->td_data=%p\n", req->td_data);
2221 VDBG(dev, "last desc = %p\n", td);
2222 /* received number bytes */
2223 if (use_dma_ppb_du) {
2224 /* every desc. counts bytes */
2225 count = udc_get_ppbdu_rxbytes(req);
2226 } else {
2227 /* last desc. counts bytes */
2228 count = AMD_GETBITS(td->status,
2229 UDC_DMA_OUT_STS_RXBYTES);
2230 if (!count && req->req.length
2231 == UDC_DMA_MAXPACKET) {
2232 /*
2233 * on 64k packets the RXBYTES
2234 * field is zero
2235 */
2236 count = UDC_DMA_MAXPACKET;
2237 }
2238 }
2239 VDBG(dev, "last desc rx bytes=%u\n", count);
2240 }
2241
2242 tmp = req->req.length - req->req.actual;
2243 if (count > tmp) {
2244 if ((tmp % ep->ep.maxpacket) != 0) {
2245 DBG(dev, "%s: rx %db, space=%db\n",
2246 ep->ep.name, count, tmp);
2247 req->req.status = -EOVERFLOW;
2248 }
2249 count = tmp;
2250 }
2251 req->req.actual += count;
2252 req->dma_going = 0;
2253 /* complete request */
2254 complete_req(ep, req, 0);
2255
2256 /* next request */
2257 if (!list_empty(&ep->queue) && !ep->halted) {
2258 req = list_entry(ep->queue.next,
2259 struct udc_request,
2260 queue);
2261 /*
2262 * DMA may be already started by udc_queue()
2263 * called by gadget drivers completion
2264 * routine. This happens when queue
2265 * holds one request only.
2266 */
2267 if (req->dma_going == 0) {
2268 /* next dma */
2269 if (prep_dma(ep, req, GFP_ATOMIC) != 0)
2270 goto finished;
2271 /* write desc pointer */
2272 writel(req->td_phys,
2273 &ep->regs->desptr);
2274 req->dma_going = 1;
2275 /* enable DMA */
2276 udc_set_rde(dev);
2277 }
2278 } else {
2279 /*
2280 * implant BNA dummy descriptor to allow
2281 * RXFIFO opening by RDE
2282 */
2283 if (ep->bna_dummy_req) {
2284 /* write desc pointer */
2285 writel(ep->bna_dummy_req->td_phys,
2286 &ep->regs->desptr);
2287 ep->bna_occurred = 0;
2288 }
2289
2290 /*
2291 * schedule timer for setting RDE if queue
2292 * remains empty to allow ep0 packets pass
2293 * through
2294 */
2295 if (set_rde != 0
2296 && !timer_pending(&udc_timer)) {
2297 udc_timer.expires =
2298 jiffies
2299 + HZ*UDC_RDE_TIMER_SECONDS;
2300 set_rde = 1;
170b778f 2301 if (!stop_timer)
55d402d8 2302 add_timer(&udc_timer);
55d402d8
TD
2303 }
2304 if (ep->num != UDC_EP0OUT_IX)
2305 dev->data_ep_queued = 0;
2306 }
2307
2308 } else {
2309 /*
2310 * RX DMA must be reenabled for each desc in PPBDU mode
2311 * and must be enabled for PPBNDU mode in case of BNA
2312 */
2313 udc_set_rde(dev);
2314 }
2315
2316 } else if (ep->cancel_transfer) {
2317 ret_val = IRQ_HANDLED;
2318 ep->cancel_transfer = 0;
2319 }
2320
2321 /* check pending CNAKS */
2322 if (cnak_pending) {
2323 /* CNAk processing when rxfifo empty only */
170b778f 2324 if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
55d402d8 2325 udc_process_cnak_queue(dev);
55d402d8
TD
2326 }
2327
2328 /* clear OUT bits in ep status */
2329 writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
2330finished:
2331 return ret_val;
2332}
2333
2334/* Interrupt handler for data IN traffic */
2335static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
2336{
2337 irqreturn_t ret_val = IRQ_NONE;
2338 u32 tmp;
2339 u32 epsts;
2340 struct udc_ep *ep;
2341 struct udc_request *req;
2342 struct udc_data_dma *td;
55d402d8
TD
2343 unsigned len;
2344
2345 ep = &dev->ep[ep_ix];
2346
2347 epsts = readl(&ep->regs->sts);
2348 if (use_dma) {
2349 /* BNA ? */
2350 if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
2351 dev_err(&dev->pdev->dev,
5647a149 2352 "BNA ep%din occurred - DESPTR = %08lx\n",
55d402d8
TD
2353 ep->num,
2354 (unsigned long) readl(&ep->regs->desptr));
2355
2356 /* clear BNA */
2357 writel(epsts, &ep->regs->sts);
2358 ret_val = IRQ_HANDLED;
2359 goto finished;
2360 }
2361 }
2362 /* HE event ? */
2363 if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
2364 dev_err(&dev->pdev->dev,
5647a149 2365 "HE ep%dn occurred - DESPTR = %08lx\n",
55d402d8
TD
2366 ep->num, (unsigned long) readl(&ep->regs->desptr));
2367
2368 /* clear HE */
2369 writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2370 ret_val = IRQ_HANDLED;
2371 goto finished;
2372 }
2373
2374 /* DMA completion */
2375 if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
2376 VDBG(dev, "TDC set- completion\n");
2377 ret_val = IRQ_HANDLED;
2378 if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
2379 req = list_entry(ep->queue.next,
2380 struct udc_request, queue);
058e698b 2381 /*
25985edc 2382 * length bytes transferred
058e698b
JL
2383 * check dma done of last desc. in PPBDU mode
2384 */
2385 if (use_dma_ppb_du) {
2386 td = udc_get_last_dma_desc(req);
872ce511 2387 if (td)
55d402d8 2388 req->req.actual = req->req.length;
058e698b
JL
2389 } else {
2390 /* assume all bytes transferred */
2391 req->req.actual = req->req.length;
2392 }
55d402d8 2393
058e698b
JL
2394 if (req->req.actual == req->req.length) {
2395 /* complete req */
2396 complete_req(ep, req, 0);
2397 req->dma_going = 0;
2398 /* further request available ? */
2399 if (list_empty(&ep->queue)) {
2400 /* disable interrupt */
2401 tmp = readl(&dev->regs->ep_irqmsk);
2402 tmp |= AMD_BIT(ep->num);
2403 writel(tmp, &dev->regs->ep_irqmsk);
55d402d8
TD
2404 }
2405 }
2406 }
2407 ep->cancel_transfer = 0;
2408
2409 }
2410 /*
2411 * status reg has IN bit set and TDC not set (if TDC was handled,
2412 * IN must not be handled (UDC defect) ?
2413 */
2414 if ((epsts & AMD_BIT(UDC_EPSTS_IN))
2415 && !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
2416 ret_val = IRQ_HANDLED;
2417 if (!list_empty(&ep->queue)) {
2418 /* next request */
2419 req = list_entry(ep->queue.next,
2420 struct udc_request, queue);
2421 /* FIFO mode */
2422 if (!use_dma) {
2423 /* write fifo */
2424 udc_txfifo_write(ep, &req->req);
2425 len = req->req.length - req->req.actual;
1435db48
CR
2426 if (len > ep->ep.maxpacket)
2427 len = ep->ep.maxpacket;
2428 req->req.actual += len;
55d402d8
TD
2429 if (req->req.actual == req->req.length
2430 || (len != ep->ep.maxpacket)) {
2431 /* complete req */
2432 complete_req(ep, req, 0);
2433 }
2434 /* DMA */
2435 } else if (req && !req->dma_going) {
2436 VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
2437 req, req->td_data);
2438 if (req->td_data) {
2439
2440 req->dma_going = 1;
2441
2442 /*
2443 * unset L bit of first desc.
2444 * for chain
2445 */
2446 if (use_dma_ppb && req->req.length >
2447 ep->ep.maxpacket) {
2448 req->td_data->status &=
2449 AMD_CLEAR_BIT(
2450 UDC_DMA_IN_STS_L);
2451 }
2452
2453 /* write desc pointer */
2454 writel(req->td_phys, &ep->regs->desptr);
2455
2456 /* set HOST READY */
2457 req->td_data->status =
2458 AMD_ADDBITS(
2459 req->td_data->status,
2460 UDC_DMA_IN_STS_BS_HOST_READY,
2461 UDC_DMA_IN_STS_BS);
2462
2463 /* set poll demand bit */
2464 tmp = readl(&ep->regs->ctl);
2465 tmp |= AMD_BIT(UDC_EPCTL_P);
2466 writel(tmp, &ep->regs->ctl);
2467 }
2468 }
2469
c5deb832
TD
2470 } else if (!use_dma && ep->in) {
2471 /* disable interrupt */
2472 tmp = readl(
2473 &dev->regs->ep_irqmsk);
2474 tmp |= AMD_BIT(ep->num);
2475 writel(tmp,
2476 &dev->regs->ep_irqmsk);
55d402d8
TD
2477 }
2478 }
2479 /* clear status bits */
2480 writel(epsts, &ep->regs->sts);
2481
2482finished:
2483 return ret_val;
2484
2485}
2486
2487/* Interrupt handler for Control OUT traffic */
2488static irqreturn_t udc_control_out_isr(struct udc *dev)
2489__releases(dev->lock)
2490__acquires(dev->lock)
2491{
2492 irqreturn_t ret_val = IRQ_NONE;
2493 u32 tmp;
2494 int setup_supported;
2495 u32 count;
2496 int set = 0;
2497 struct udc_ep *ep;
2498 struct udc_ep *ep_tmp;
2499
2500 ep = &dev->ep[UDC_EP0OUT_IX];
2501
2502 /* clear irq */
2503 writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
2504
2505 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2506 /* check BNA and clear if set */
2507 if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2508 VDBG(dev, "ep0: BNA set\n");
2509 writel(AMD_BIT(UDC_EPSTS_BNA),
2510 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2511 ep->bna_occurred = 1;
2512 ret_val = IRQ_HANDLED;
2513 goto finished;
2514 }
2515
2516 /* type of data: SETUP or DATA 0 bytes */
2517 tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
2518 VDBG(dev, "data_typ = %x\n", tmp);
2519
2520 /* setup data */
2521 if (tmp == UDC_EPSTS_OUT_SETUP) {
2522 ret_val = IRQ_HANDLED;
2523
2524 ep->dev->stall_ep0in = 0;
2525 dev->waiting_zlp_ack_ep0in = 0;
2526
2527 /* set NAK for EP0_IN */
2528 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2529 tmp |= AMD_BIT(UDC_EPCTL_SNAK);
2530 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2531 dev->ep[UDC_EP0IN_IX].naking = 1;
2532 /* get setup data */
2533 if (use_dma) {
2534
2535 /* clear OUT bits in ep status */
2536 writel(UDC_EPSTS_OUT_CLEAR,
2537 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2538
2539 setup_data.data[0] =
2540 dev->ep[UDC_EP0OUT_IX].td_stp->data12;
2541 setup_data.data[1] =
2542 dev->ep[UDC_EP0OUT_IX].td_stp->data34;
2543 /* set HOST READY */
2544 dev->ep[UDC_EP0OUT_IX].td_stp->status =
2545 UDC_DMA_STP_STS_BS_HOST_READY;
2546 } else {
2547 /* read fifo */
2548 udc_rxfifo_read_dwords(dev, setup_data.data, 2);
2549 }
2550
2551 /* determine direction of control data */
2552 if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
2553 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
2554 /* enable RDE */
2555 udc_ep0_set_rde(dev);
2556 set = 0;
2557 } else {
2558 dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
2559 /*
2560 * implant BNA dummy descriptor to allow RXFIFO opening
2561 * by RDE
2562 */
2563 if (ep->bna_dummy_req) {
2564 /* write desc pointer */
2565 writel(ep->bna_dummy_req->td_phys,
2566 &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2567 ep->bna_occurred = 0;
2568 }
2569
2570 set = 1;
2571 dev->ep[UDC_EP0OUT_IX].naking = 1;
2572 /*
2573 * setup timer for enabling RDE (to not enable
2574 * RXFIFO DMA for data to early)
2575 */
2576 set_rde = 1;
2577 if (!timer_pending(&udc_timer)) {
2578 udc_timer.expires = jiffies +
2579 HZ/UDC_RDE_TIMER_DIV;
170b778f 2580 if (!stop_timer)
55d402d8 2581 add_timer(&udc_timer);
55d402d8
TD
2582 }
2583 }
2584
2585 /*
2586 * mass storage reset must be processed here because
2587 * next packet may be a CLEAR_FEATURE HALT which would not
2588 * clear the stall bit when no STALL handshake was received
2589 * before (autostall can cause this)
2590 */
2591 if (setup_data.data[0] == UDC_MSCRES_DWORD0
2592 && setup_data.data[1] == UDC_MSCRES_DWORD1) {
2593 DBG(dev, "MSC Reset\n");
2594 /*
2595 * clear stall bits
2596 * only one IN and OUT endpoints are handled
2597 */
2598 ep_tmp = &udc->ep[UDC_EPIN_IX];
2599 udc_set_halt(&ep_tmp->ep, 0);
2600 ep_tmp = &udc->ep[UDC_EPOUT_IX];
2601 udc_set_halt(&ep_tmp->ep, 0);
2602 }
2603
2604 /* call gadget with setup data received */
2605 spin_unlock(&dev->lock);
2606 setup_supported = dev->driver->setup(&dev->gadget,
2607 &setup_data.request);
2608 spin_lock(&dev->lock);
2609
2610 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2611 /* ep0 in returns data (not zlp) on IN phase */
2612 if (setup_supported >= 0 && setup_supported <
2613 UDC_EP0IN_MAXPACKET) {
2614 /* clear NAK by writing CNAK in EP0_IN */
2615 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2616 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2617 dev->ep[UDC_EP0IN_IX].naking = 0;
2618 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
2619
2620 /* if unsupported request then stall */
2621 } else if (setup_supported < 0) {
2622 tmp |= AMD_BIT(UDC_EPCTL_S);
2623 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2624 } else
2625 dev->waiting_zlp_ack_ep0in = 1;
2626
2627
2628 /* clear NAK by writing CNAK in EP0_OUT */
2629 if (!set) {
2630 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2631 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2632 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2633 dev->ep[UDC_EP0OUT_IX].naking = 0;
2634 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
2635 }
2636
2637 if (!use_dma) {
2638 /* clear OUT bits in ep status */
2639 writel(UDC_EPSTS_OUT_CLEAR,
2640 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2641 }
2642
2643 /* data packet 0 bytes */
2644 } else if (tmp == UDC_EPSTS_OUT_DATA) {
2645 /* clear OUT bits in ep status */
2646 writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
2647
2648 /* get setup data: only 0 packet */
2649 if (use_dma) {
2650 /* no req if 0 packet, just reactivate */
2651 if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
2652 VDBG(dev, "ZLP\n");
2653
2654 /* set HOST READY */
2655 dev->ep[UDC_EP0OUT_IX].td->status =
2656 AMD_ADDBITS(
2657 dev->ep[UDC_EP0OUT_IX].td->status,
2658 UDC_DMA_OUT_STS_BS_HOST_READY,
2659 UDC_DMA_OUT_STS_BS);
2660 /* enable RDE */
2661 udc_ep0_set_rde(dev);
2662 ret_val = IRQ_HANDLED;
2663
2664 } else {
2665 /* control write */
2666 ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2667 /* re-program desc. pointer for possible ZLPs */
2668 writel(dev->ep[UDC_EP0OUT_IX].td_phys,
2669 &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2670 /* enable RDE */
2671 udc_ep0_set_rde(dev);
2672 }
2673 } else {
2674
2675 /* received number bytes */
2676 count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2677 count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
2678 /* out data for fifo mode not working */
2679 count = 0;
2680
2681 /* 0 packet or real data ? */
2682 if (count != 0) {
2683 ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2684 } else {
2685 /* dummy read confirm */
2686 readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
2687 ret_val = IRQ_HANDLED;
2688 }
2689 }
2690 }
2691
2692 /* check pending CNAKS */
2693 if (cnak_pending) {
2694 /* CNAk processing when rxfifo empty only */
170b778f 2695 if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
55d402d8 2696 udc_process_cnak_queue(dev);
55d402d8
TD
2697 }
2698
2699finished:
2700 return ret_val;
2701}
2702
2703/* Interrupt handler for Control IN traffic */
2704static irqreturn_t udc_control_in_isr(struct udc *dev)
2705{
2706 irqreturn_t ret_val = IRQ_NONE;
2707 u32 tmp;
2708 struct udc_ep *ep;
2709 struct udc_request *req;
2710 unsigned len;
2711
2712 ep = &dev->ep[UDC_EP0IN_IX];
2713
2714 /* clear irq */
2715 writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
2716
2717 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
2718 /* DMA completion */
2719 if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
5647a149 2720 VDBG(dev, "isr: TDC clear\n");
55d402d8
TD
2721 ret_val = IRQ_HANDLED;
2722
2723 /* clear TDC bit */
2724 writel(AMD_BIT(UDC_EPSTS_TDC),
2725 &dev->ep[UDC_EP0IN_IX].regs->sts);
2726
2727 /* status reg has IN bit set ? */
2728 } else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
2729 ret_val = IRQ_HANDLED;
2730
2731 if (ep->dma) {
2732 /* clear IN bit */
2733 writel(AMD_BIT(UDC_EPSTS_IN),
2734 &dev->ep[UDC_EP0IN_IX].regs->sts);
2735 }
2736 if (dev->stall_ep0in) {
2737 DBG(dev, "stall ep0in\n");
2738 /* halt ep0in */
2739 tmp = readl(&ep->regs->ctl);
2740 tmp |= AMD_BIT(UDC_EPCTL_S);
2741 writel(tmp, &ep->regs->ctl);
2742 } else {
2743 if (!list_empty(&ep->queue)) {
2744 /* next request */
2745 req = list_entry(ep->queue.next,
2746 struct udc_request, queue);
2747
2748 if (ep->dma) {
2749 /* write desc pointer */
2750 writel(req->td_phys, &ep->regs->desptr);
2751 /* set HOST READY */
2752 req->td_data->status =
2753 AMD_ADDBITS(
2754 req->td_data->status,
2755 UDC_DMA_STP_STS_BS_HOST_READY,
2756 UDC_DMA_STP_STS_BS);
2757
2758 /* set poll demand bit */
2759 tmp =
2760 readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2761 tmp |= AMD_BIT(UDC_EPCTL_P);
2762 writel(tmp,
2763 &dev->ep[UDC_EP0IN_IX].regs->ctl);
2764
2765 /* all bytes will be transferred */
2766 req->req.actual = req->req.length;
2767
2768 /* complete req */
2769 complete_req(ep, req, 0);
2770
2771 } else {
2772 /* write fifo */
2773 udc_txfifo_write(ep, &req->req);
2774
25985edc 2775 /* lengh bytes transferred */
55d402d8
TD
2776 len = req->req.length - req->req.actual;
2777 if (len > ep->ep.maxpacket)
2778 len = ep->ep.maxpacket;
2779
2780 req->req.actual += len;
2781 if (req->req.actual == req->req.length
2782 || (len != ep->ep.maxpacket)) {
2783 /* complete req */
2784 complete_req(ep, req, 0);
2785 }
2786 }
2787
2788 }
2789 }
2790 ep->halted = 0;
2791 dev->stall_ep0in = 0;
2792 if (!ep->dma) {
2793 /* clear IN bit */
2794 writel(AMD_BIT(UDC_EPSTS_IN),
2795 &dev->ep[UDC_EP0IN_IX].regs->sts);
2796 }
2797 }
2798
2799 return ret_val;
2800}
2801
2802
2803/* Interrupt handler for global device events */
2804static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
2805__releases(dev->lock)
2806__acquires(dev->lock)
2807{
2808 irqreturn_t ret_val = IRQ_NONE;
2809 u32 tmp;
2810 u32 cfg;
2811 struct udc_ep *ep;
2812 u16 i;
2813 u8 udc_csr_epix;
2814
2815 /* SET_CONFIG irq ? */
2816 if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
2817 ret_val = IRQ_HANDLED;
2818
2819 /* read config value */
2820 tmp = readl(&dev->regs->sts);
2821 cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
2822 DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
2823 dev->cur_config = cfg;
2824 dev->set_cfg_not_acked = 1;
2825
2826 /* make usb request for gadget driver */
2827 memset(&setup_data, 0 , sizeof(union udc_setup_data));
2828 setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
fd05e720 2829 setup_data.request.wValue = cpu_to_le16(dev->cur_config);
55d402d8
TD
2830
2831 /* programm the NE registers */
2832 for (i = 0; i < UDC_EP_NUM; i++) {
2833 ep = &dev->ep[i];
2834 if (ep->in) {
2835
2836 /* ep ix in UDC CSR register space */
2837 udc_csr_epix = ep->num;
2838
2839
2840 /* OUT ep */
2841 } else {
2842 /* ep ix in UDC CSR register space */
2843 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2844 }
2845
2846 tmp = readl(&dev->csr->ne[udc_csr_epix]);
2847 /* ep cfg */
2848 tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
2849 UDC_CSR_NE_CFG);
2850 /* write reg */
2851 writel(tmp, &dev->csr->ne[udc_csr_epix]);
2852
2853 /* clear stall bits */
2854 ep->halted = 0;
2855 tmp = readl(&ep->regs->ctl);
2856 tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2857 writel(tmp, &ep->regs->ctl);
2858 }
2859 /* call gadget zero with setup data received */
2860 spin_unlock(&dev->lock);
2861 tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2862 spin_lock(&dev->lock);
2863
2864 } /* SET_INTERFACE ? */
2865 if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
2866 ret_val = IRQ_HANDLED;
2867
2868 dev->set_cfg_not_acked = 1;
2869 /* read interface and alt setting values */
2870 tmp = readl(&dev->regs->sts);
2871 dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
2872 dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
2873
2874 /* make usb request for gadget driver */
2875 memset(&setup_data, 0 , sizeof(union udc_setup_data));
2876 setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
2877 setup_data.request.bRequestType = USB_RECIP_INTERFACE;
fd05e720
AV
2878 setup_data.request.wValue = cpu_to_le16(dev->cur_alt);
2879 setup_data.request.wIndex = cpu_to_le16(dev->cur_intf);
55d402d8
TD
2880
2881 DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
2882 dev->cur_alt, dev->cur_intf);
2883
2884 /* programm the NE registers */
2885 for (i = 0; i < UDC_EP_NUM; i++) {
2886 ep = &dev->ep[i];
2887 if (ep->in) {
2888
2889 /* ep ix in UDC CSR register space */
2890 udc_csr_epix = ep->num;
2891
2892
2893 /* OUT ep */
2894 } else {
2895 /* ep ix in UDC CSR register space */
2896 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2897 }
2898
2899 /* UDC CSR reg */
2900 /* set ep values */
2901 tmp = readl(&dev->csr->ne[udc_csr_epix]);
2902 /* ep interface */
2903 tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
2904 UDC_CSR_NE_INTF);
2905 /* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
2906 /* ep alt */
2907 tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
2908 UDC_CSR_NE_ALT);
2909 /* write reg */
2910 writel(tmp, &dev->csr->ne[udc_csr_epix]);
2911
2912 /* clear stall bits */
2913 ep->halted = 0;
2914 tmp = readl(&ep->regs->ctl);
2915 tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2916 writel(tmp, &ep->regs->ctl);
2917 }
2918
2919 /* call gadget zero with setup data received */
2920 spin_unlock(&dev->lock);
2921 tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2922 spin_lock(&dev->lock);
2923
2924 } /* USB reset */
2925 if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
2926 DBG(dev, "USB Reset interrupt\n");
2927 ret_val = IRQ_HANDLED;
2928
2929 /* allow soft reset when suspend occurs */
2930 soft_reset_occured = 0;
2931
2932 dev->waiting_zlp_ack_ep0in = 0;
2933 dev->set_cfg_not_acked = 0;
2934
2935 /* mask not needed interrupts */
2936 udc_mask_unused_interrupts(dev);
2937
2938 /* call gadget to resume and reset configs etc. */
2939 spin_unlock(&dev->lock);
2940 if (dev->sys_suspended && dev->driver->resume) {
2941 dev->driver->resume(&dev->gadget);
2942 dev->sys_suspended = 0;
2943 }
107d13c7 2944 usb_gadget_udc_reset(&dev->gadget, dev->driver);
55d402d8
TD
2945 spin_lock(&dev->lock);
2946
2947 /* disable ep0 to empty req queue */
2948 empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2949 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
2950
2951 /* soft reset when rxfifo not empty */
2952 tmp = readl(&dev->regs->sts);
2953 if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2954 && !soft_reset_after_usbreset_occured) {
2955 udc_soft_reset(dev);
2956 soft_reset_after_usbreset_occured++;
2957 }
2958
2959 /*
2960 * DMA reset to kill potential old DMA hw hang,
2961 * POLL bit is already reset by ep_init() through
2962 * disconnect()
2963 */
2964 DBG(dev, "DMA machine reset\n");
2965 tmp = readl(&dev->regs->cfg);
2966 writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
2967 writel(tmp, &dev->regs->cfg);
2968
2969 /* put into initial config */
2970 udc_basic_init(dev);
2971
2972 /* enable device setup interrupts */
2973 udc_enable_dev_setup_interrupts(dev);
2974
2975 /* enable suspend interrupt */
2976 tmp = readl(&dev->regs->irqmsk);
2977 tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
2978 writel(tmp, &dev->regs->irqmsk);
2979
2980 } /* USB suspend */
2981 if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
2982 DBG(dev, "USB Suspend interrupt\n");
2983 ret_val = IRQ_HANDLED;
2984 if (dev->driver->suspend) {
2985 spin_unlock(&dev->lock);
2986 dev->sys_suspended = 1;
2987 dev->driver->suspend(&dev->gadget);
2988 spin_lock(&dev->lock);
2989 }
2990 } /* new speed ? */
2991 if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
2992 DBG(dev, "ENUM interrupt\n");
2993 ret_val = IRQ_HANDLED;
2994 soft_reset_after_usbreset_occured = 0;
2995
2996 /* disable ep0 to empty req queue */
2997 empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2998 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
2999
3000 /* link up all endpoints */
3001 udc_setup_endpoints(dev);
e538dfda
MN
3002 dev_info(&dev->pdev->dev, "Connect: %s\n",
3003 usb_speed_string(dev->gadget.speed));
55d402d8
TD
3004
3005 /* init ep 0 */
3006 activate_control_endpoints(dev);
3007
3008 /* enable ep0 interrupts */
3009 udc_enable_ep0_interrupts(dev);
3010 }
3011 /* session valid change interrupt */
3012 if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
3013 DBG(dev, "USB SVC interrupt\n");
3014 ret_val = IRQ_HANDLED;
3015
3016 /* check that session is not valid to detect disconnect */
3017 tmp = readl(&dev->regs->sts);
3018 if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
3019 /* disable suspend interrupt */
3020 tmp = readl(&dev->regs->irqmsk);
3021 tmp |= AMD_BIT(UDC_DEVINT_US);
3022 writel(tmp, &dev->regs->irqmsk);
3023 DBG(dev, "USB Disconnect (session valid low)\n");
3024 /* cleanup on disconnect */
3025 usb_disconnect(udc);
3026 }
3027
3028 }
3029
3030 return ret_val;
3031}
3032
3033/* Interrupt Service Routine, see Linux Kernel Doc for parameters */
3034static irqreturn_t udc_irq(int irq, void *pdev)
3035{
3036 struct udc *dev = pdev;
3037 u32 reg;
3038 u16 i;
3039 u32 ep_irq;
3040 irqreturn_t ret_val = IRQ_NONE;
3041
3042 spin_lock(&dev->lock);
3043
3044 /* check for ep irq */
3045 reg = readl(&dev->regs->ep_irqsts);
3046 if (reg) {
3047 if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
3048 ret_val |= udc_control_out_isr(dev);
3049 if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
3050 ret_val |= udc_control_in_isr(dev);
3051
3052 /*
3053 * data endpoint
3054 * iterate ep's
3055 */
3056 for (i = 1; i < UDC_EP_NUM; i++) {
3057 ep_irq = 1 << i;
3058 if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
3059 continue;
3060
3061 /* clear irq status */
3062 writel(ep_irq, &dev->regs->ep_irqsts);
3063
3064 /* irq for out ep ? */
3065 if (i > UDC_EPIN_NUM)
3066 ret_val |= udc_data_out_isr(dev, i);
3067 else
3068 ret_val |= udc_data_in_isr(dev, i);
3069 }
3070
3071 }
3072
3073
3074 /* check for dev irq */
3075 reg = readl(&dev->regs->irqsts);
3076 if (reg) {
3077 /* clear irq */
3078 writel(reg, &dev->regs->irqsts);
3079 ret_val |= udc_dev_isr(dev, reg);
3080 }
3081
3082
3083 spin_unlock(&dev->lock);
3084 return ret_val;
3085}
3086
3087/* Tears down device */
3088static void gadget_release(struct device *pdev)
3089{
3090 struct amd5536udc *dev = dev_get_drvdata(pdev);
3091 kfree(dev);
3092}
3093
3094/* Cleanup on device remove */
3095static void udc_remove(struct udc *dev)
3096{
3097 /* remove timer */
3098 stop_timer++;
3099 if (timer_pending(&udc_timer))
3100 wait_for_completion(&on_exit);
3101 if (udc_timer.data)
3102 del_timer_sync(&udc_timer);
3103 /* remove pollstall timer */
3104 stop_pollstall_timer++;
3105 if (timer_pending(&udc_pollstall_timer))
3106 wait_for_completion(&on_pollstall_exit);
3107 if (udc_pollstall_timer.data)
3108 del_timer_sync(&udc_pollstall_timer);
3109 udc = NULL;
3110}
3111
580693bb
SM
3112/* free all the dma pools */
3113static void free_dma_pools(struct udc *dev)
3114{
3115 dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td,
3116 dev->ep[UDC_EP0OUT_IX].td_phys);
3117 dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
3118 dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3119 dma_pool_destroy(dev->stp_requests);
3120 dma_pool_destroy(dev->data_requests);
3121}
3122
55d402d8
TD
3123/* Reset all pci context */
3124static void udc_pci_remove(struct pci_dev *pdev)
3125{
3126 struct udc *dev;
3127
3128 dev = pci_get_drvdata(pdev);
3129
0f91349b 3130 usb_del_gadget_udc(&udc->gadget);
55d402d8 3131 /* gadget driver must not be registered */
2e1b7d0c
SM
3132 if (WARN_ON(dev->driver))
3133 return;
55d402d8
TD
3134
3135 /* dma pool cleanup */
f349dd3c 3136 free_dma_pools(dev);
55d402d8
TD
3137
3138 /* reset controller */
3139 writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
76c3727d
SM
3140 free_irq(pdev->irq, dev);
3141 iounmap(dev->virt_addr);
3142 release_mem_region(pci_resource_start(pdev, 0),
3143 pci_resource_len(pdev, 0));
3144 pci_disable_device(pdev);
55d402d8 3145
55d402d8
TD
3146 udc_remove(dev);
3147}
3148
3149/* create dma pools on init */
3150static int init_dma_pools(struct udc *dev)
3151{
3152 struct udc_stp_dma *td_stp;
3153 struct udc_data_dma *td_data;
3154 int retval;
3155
3156 /* consistent DMA mode setting ? */
3157 if (use_dma_ppb) {
3158 use_dma_bufferfill_mode = 0;
3159 } else {
3160 use_dma_ppb_du = 0;
3161 use_dma_bufferfill_mode = 1;
3162 }
3163
3164 /* DMA setup */
3165 dev->data_requests = dma_pool_create("data_requests", NULL,
3166 sizeof(struct udc_data_dma), 0, 0);
3167 if (!dev->data_requests) {
3168 DBG(dev, "can't get request data pool\n");
14a37ec6 3169 return -ENOMEM;
55d402d8
TD
3170 }
3171
3172 /* EP0 in dma regs = dev control regs */
3173 dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
3174
3175 /* dma desc for setup data */
3176 dev->stp_requests = dma_pool_create("setup requests", NULL,
3177 sizeof(struct udc_stp_dma), 0, 0);
3178 if (!dev->stp_requests) {
3179 DBG(dev, "can't get stp request pool\n");
3180 retval = -ENOMEM;
14a37ec6 3181 goto err_create_dma_pool;
55d402d8
TD
3182 }
3183 /* setup */
3184 td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3185 &dev->ep[UDC_EP0OUT_IX].td_stp_dma);
1b701508 3186 if (!td_stp) {
55d402d8 3187 retval = -ENOMEM;
14a37ec6 3188 goto err_alloc_dma;
55d402d8
TD
3189 }
3190 dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
3191
3192 /* data: 0 packets !? */
3193 td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3194 &dev->ep[UDC_EP0OUT_IX].td_phys);
1b701508 3195 if (!td_data) {
55d402d8 3196 retval = -ENOMEM;
14a37ec6 3197 goto err_alloc_phys;
55d402d8
TD
3198 }
3199 dev->ep[UDC_EP0OUT_IX].td = td_data;
3200 return 0;
3201
14a37ec6
SM
3202err_alloc_phys:
3203 dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
3204 dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3205err_alloc_dma:
3206 dma_pool_destroy(dev->stp_requests);
3207 dev->stp_requests = NULL;
3208err_create_dma_pool:
3209 dma_pool_destroy(dev->data_requests);
3210 dev->data_requests = NULL;
55d402d8
TD
3211 return retval;
3212}
3213
4f06b6bb
SM
3214/* general probe */
3215static int udc_probe(struct udc *dev)
3216{
3217 char tmp[128];
3218 u32 reg;
3219 int retval;
3220
3221 /* mark timer as not initialized */
3222 udc_timer.data = 0;
3223 udc_pollstall_timer.data = 0;
3224
3225 /* device struct setup */
3226 dev->gadget.ops = &udc_ops;
3227
3228 dev_set_name(&dev->gadget.dev, "gadget");
3229 dev->gadget.name = name;
3230 dev->gadget.max_speed = USB_SPEED_HIGH;
3231
3232 /* init registers, interrupts, ... */
3233 startup_registers(dev);
3234
3235 dev_info(&dev->pdev->dev, "%s\n", mod_desc);
3236
3237 snprintf(tmp, sizeof(tmp), "%d", dev->irq);
3238 dev_info(&dev->pdev->dev,
3239 "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
3240 tmp, dev->phys_addr, dev->chiprev,
3241 (dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1");
3242 strcpy(tmp, UDC_DRIVER_VERSION_STRING);
3243 if (dev->chiprev == UDC_HSA0_REV) {
3244 dev_err(&dev->pdev->dev, "chip revision is A0; too old\n");
3245 retval = -ENODEV;
3246 goto finished;
3247 }
3248 dev_info(&dev->pdev->dev,
3249 "driver version: %s(for Geode5536 B1)\n", tmp);
3250 udc = dev;
3251
3252 retval = usb_add_gadget_udc_release(&udc->pdev->dev, &dev->gadget,
3253 gadget_release);
3254 if (retval)
3255 goto finished;
3256
3257 /* timer init */
3258 init_timer(&udc_timer);
3259 udc_timer.function = udc_timer_function;
3260 udc_timer.data = 1;
3261 /* timer pollstall init */
3262 init_timer(&udc_pollstall_timer);
3263 udc_pollstall_timer.function = udc_pollstall_timer_function;
3264 udc_pollstall_timer.data = 1;
3265
3266 /* set SD */
3267 reg = readl(&dev->regs->ctl);
3268 reg |= AMD_BIT(UDC_DEVCTL_SD);
3269 writel(reg, &dev->regs->ctl);
3270
3271 /* print dev register info */
3272 print_regs(dev);
3273
3274 return 0;
3275
3276finished:
3277 return retval;
3278}
3279
55d402d8
TD
3280/* Called by pci bus driver to init pci context */
3281static int udc_pci_probe(
3282 struct pci_dev *pdev,
3283 const struct pci_device_id *id
3284)
3285{
3286 struct udc *dev;
3287 unsigned long resource;
3288 unsigned long len;
3289 int retval = 0;
3290
3291 /* one udc only */
3292 if (udc) {
3293 dev_dbg(&pdev->dev, "already probed\n");
3294 return -EBUSY;
3295 }
3296
3297 /* init */
3298 dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
6527cc27
AK
3299 if (!dev)
3300 return -ENOMEM;
55d402d8
TD
3301
3302 /* pci setup */
3303 if (pci_enable_device(pdev) < 0) {
3304 retval = -ENODEV;
6527cc27 3305 goto err_pcidev;
55d402d8 3306 }
55d402d8
TD
3307
3308 /* PCI resource allocation */
3309 resource = pci_resource_start(pdev, 0);
3310 len = pci_resource_len(pdev, 0);
3311
3312 if (!request_mem_region(resource, len, name)) {
3313 dev_dbg(&pdev->dev, "pci device used already\n");
3314 retval = -EBUSY;
6527cc27 3315 goto err_memreg;
55d402d8 3316 }
55d402d8
TD
3317
3318 dev->virt_addr = ioremap_nocache(resource, len);
1b701508 3319 if (!dev->virt_addr) {
55d402d8
TD
3320 dev_dbg(&pdev->dev, "start address cannot be mapped\n");
3321 retval = -EFAULT;
6527cc27 3322 goto err_ioremap;
55d402d8
TD
3323 }
3324
3325 if (!pdev->irq) {
25e14c1f 3326 dev_err(&pdev->dev, "irq not set\n");
55d402d8 3327 retval = -ENODEV;
6527cc27 3328 goto err_irq;
55d402d8
TD
3329 }
3330
c5deb832
TD
3331 spin_lock_init(&dev->lock);
3332 /* udc csr registers base */
3333 dev->csr = dev->virt_addr + UDC_CSR_ADDR;
3334 /* dev registers base */
3335 dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
3336 /* ep registers base */
3337 dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
3338 /* fifo's base */
3339 dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR);
3340 dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
3341
55d402d8 3342 if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
25e14c1f 3343 dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq);
55d402d8 3344 retval = -EBUSY;
6527cc27 3345 goto err_irq;
55d402d8 3346 }
55d402d8
TD
3347
3348 pci_set_drvdata(pdev, dev);
3349
1d3ee41e
AK
3350 /* chip revision for Hs AMD5536 */
3351 dev->chiprev = pdev->revision;
55d402d8
TD
3352
3353 pci_set_master(pdev);
51745281 3354 pci_try_set_mwi(pdev);
55d402d8 3355
55d402d8
TD
3356 /* init dma pools */
3357 if (use_dma) {
3358 retval = init_dma_pools(dev);
3359 if (retval != 0)
580693bb 3360 goto err_dma;
55d402d8
TD
3361 }
3362
3363 dev->phys_addr = resource;
3364 dev->irq = pdev->irq;
3365 dev->pdev = pdev;
55d402d8
TD
3366
3367 /* general probing */
580693bb
SM
3368 if (udc_probe(dev)) {
3369 retval = -ENODEV;
3370 goto err_probe;
3371 }
3372 return 0;
6527cc27 3373
580693bb
SM
3374err_probe:
3375 if (use_dma)
3376 free_dma_pools(dev);
3377err_dma:
3378 free_irq(pdev->irq, dev);
6527cc27
AK
3379err_irq:
3380 iounmap(dev->virt_addr);
3381err_ioremap:
3382 release_mem_region(resource, len);
3383err_memreg:
3384 pci_disable_device(pdev);
3385err_pcidev:
3386 kfree(dev);
55d402d8
TD
3387 return retval;
3388}
3389
55d402d8 3390/* PCI device parameters */
9510ecee 3391static const struct pci_device_id pci_id[] = {
55d402d8
TD
3392 {
3393 PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096),
7b78f48a 3394 .class = PCI_CLASS_SERIAL_USB_DEVICE,
55d402d8
TD
3395 .class_mask = 0xffffffff,
3396 },
3397 {},
3398};
3399MODULE_DEVICE_TABLE(pci, pci_id);
3400
3401/* PCI functions */
3402static struct pci_driver udc_pci_driver = {
3403 .name = (char *) name,
3404 .id_table = pci_id,
3405 .probe = udc_pci_probe,
3406 .remove = udc_pci_remove,
3407};
3408
3cdb7721 3409module_pci_driver(udc_pci_driver);
55d402d8
TD
3410
3411MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
3412MODULE_AUTHOR("Thomas Dahlmann");
3413MODULE_LICENSE("GPL");