usb: gadget: amd5536udc: remove forward declaration of udc_pci_*
[linux-2.6-block.git] / drivers / usb / gadget / udc / amd5536udc.c
CommitLineData
55d402d8
TD
1/*
2 * amd5536.c -- AMD 5536 UDC high/full speed USB device controller
3 *
4 * Copyright (C) 2005-2007 AMD (http://www.amd.com)
5 * Author: Thomas Dahlmann
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
55d402d8
TD
11 */
12
13/*
14 * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536.
15 * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it
16 * provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
17 *
18 * Make sure that UDC is assigned to port 4 by BIOS settings (port can also
19 * be used as host port) and UOC bits PAD_EN and APU are set (should be done
20 * by BIOS init).
21 *
22 * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not
23 * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0")
24 * can be used with gadget ether.
25 */
26
27/* debug control */
28/* #define UDC_VERBOSE */
29
30/* Driver strings */
31#define UDC_MOD_DESCRIPTION "AMD 5536 UDC - USB Device Controller"
c15e03e1 32#define UDC_DRIVER_VERSION_STRING "01.00.0206"
55d402d8
TD
33
34/* system */
35#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/kernel.h>
55d402d8
TD
38#include <linux/delay.h>
39#include <linux/ioport.h>
40#include <linux/sched.h>
41#include <linux/slab.h>
55d402d8 42#include <linux/errno.h>
55d402d8
TD
43#include <linux/timer.h>
44#include <linux/list.h>
45#include <linux/interrupt.h>
46#include <linux/ioctl.h>
47#include <linux/fs.h>
48#include <linux/dmapool.h>
49#include <linux/moduleparam.h>
50#include <linux/device.h>
51#include <linux/io.h>
52#include <linux/irq.h>
b38b03b3 53#include <linux/prefetch.h>
55d402d8
TD
54
55#include <asm/byteorder.h>
55d402d8
TD
56#include <asm/unaligned.h>
57
58/* gadget stack */
59#include <linux/usb/ch9.h>
9454a57a 60#include <linux/usb/gadget.h>
55d402d8
TD
61
62/* udc specific */
63#include "amd5536udc.h"
64
65
66static void udc_tasklet_disconnect(unsigned long);
67static void empty_req_queue(struct udc_ep *);
55d402d8
TD
68static void udc_basic_init(struct udc *dev);
69static void udc_setup_endpoints(struct udc *dev);
70static void udc_soft_reset(struct udc *dev);
71static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
72static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
55d402d8
TD
73
74/* description */
75static const char mod_desc[] = UDC_MOD_DESCRIPTION;
76static const char name[] = "amd5536udc";
77
78/* structure to hold endpoint function pointers */
79static const struct usb_ep_ops udc_ep_ops;
80
81/* received setup data */
82static union udc_setup_data setup_data;
83
84/* pointer to device object */
85static struct udc *udc;
86
87/* irq spin lock for soft reset */
88static DEFINE_SPINLOCK(udc_irq_spinlock);
89/* stall spin lock */
90static DEFINE_SPINLOCK(udc_stall_spinlock);
91
92/*
93* slave mode: pending bytes in rx fifo after nyet,
94* used if EPIN irq came but no req was available
95*/
96static unsigned int udc_rxfifo_pending;
97
98/* count soft resets after suspend to avoid loop */
99static int soft_reset_occured;
100static int soft_reset_after_usbreset_occured;
101
102/* timer */
103static struct timer_list udc_timer;
104static int stop_timer;
105
106/* set_rde -- Is used to control enabling of RX DMA. Problem is
107 * that UDC has only one bit (RDE) to enable/disable RX DMA for
108 * all OUT endpoints. So we have to handle race conditions like
109 * when OUT data reaches the fifo but no request was queued yet.
110 * This cannot be solved by letting the RX DMA disabled until a
111 * request gets queued because there may be other OUT packets
112 * in the FIFO (important for not blocking control traffic).
113 * The value of set_rde controls the correspondig timer.
114 *
115 * set_rde -1 == not used, means it is alloed to be set to 0 or 1
116 * set_rde 0 == do not touch RDE, do no start the RDE timer
117 * set_rde 1 == timer function will look whether FIFO has data
118 * set_rde 2 == set by timer function to enable RX DMA on next call
119 */
120static int set_rde = -1;
121
122static DECLARE_COMPLETION(on_exit);
123static struct timer_list udc_pollstall_timer;
124static int stop_pollstall_timer;
125static DECLARE_COMPLETION(on_pollstall_exit);
126
127/* tasklet for usb disconnect */
128static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect,
129 (unsigned long) &udc);
130
131
132/* endpoint names used for print */
133static const char ep0_string[] = "ep0in";
6f02ac5a
RB
134static const struct {
135 const char *name;
136 const struct usb_ep_caps caps;
137} ep_info[] = {
138#define EP_INFO(_name, _caps) \
139 { \
140 .name = _name, \
141 .caps = _caps, \
142 }
143
144 EP_INFO(ep0_string,
145 USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_IN)),
146 EP_INFO("ep1in-int",
147 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
148 EP_INFO("ep2in-bulk",
149 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
150 EP_INFO("ep3in-bulk",
151 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
152 EP_INFO("ep4in-bulk",
153 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
154 EP_INFO("ep5in-bulk",
155 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
156 EP_INFO("ep6in-bulk",
157 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
158 EP_INFO("ep7in-bulk",
159 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
160 EP_INFO("ep8in-bulk",
161 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
162 EP_INFO("ep9in-bulk",
163 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
164 EP_INFO("ep10in-bulk",
165 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
166 EP_INFO("ep11in-bulk",
167 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
168 EP_INFO("ep12in-bulk",
169 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
170 EP_INFO("ep13in-bulk",
171 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
172 EP_INFO("ep14in-bulk",
173 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
174 EP_INFO("ep15in-bulk",
175 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
176 EP_INFO("ep0out",
177 USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_OUT)),
178 EP_INFO("ep1out-bulk",
179 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
180 EP_INFO("ep2out-bulk",
181 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
182 EP_INFO("ep3out-bulk",
183 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
184 EP_INFO("ep4out-bulk",
185 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
186 EP_INFO("ep5out-bulk",
187 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
188 EP_INFO("ep6out-bulk",
189 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
190 EP_INFO("ep7out-bulk",
191 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
192 EP_INFO("ep8out-bulk",
193 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
194 EP_INFO("ep9out-bulk",
195 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
196 EP_INFO("ep10out-bulk",
197 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
198 EP_INFO("ep11out-bulk",
199 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
200 EP_INFO("ep12out-bulk",
201 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
202 EP_INFO("ep13out-bulk",
203 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
204 EP_INFO("ep14out-bulk",
205 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
206 EP_INFO("ep15out-bulk",
207 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
208
209#undef EP_INFO
55d402d8
TD
210};
211
212/* DMA usage flag */
90ab5ee9 213static bool use_dma = 1;
55d402d8 214/* packet per buffer dma */
90ab5ee9 215static bool use_dma_ppb = 1;
55d402d8 216/* with per descr. update */
90ab5ee9 217static bool use_dma_ppb_du;
55d402d8
TD
218/* buffer fill mode */
219static int use_dma_bufferfill_mode;
220/* full speed only mode */
90ab5ee9 221static bool use_fullspeed;
55d402d8
TD
222/* tx buffer size for high speed */
223static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
224
225/* module parameters */
226module_param(use_dma, bool, S_IRUGO);
227MODULE_PARM_DESC(use_dma, "true for DMA");
228module_param(use_dma_ppb, bool, S_IRUGO);
229MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode");
230module_param(use_dma_ppb_du, bool, S_IRUGO);
231MODULE_PARM_DESC(use_dma_ppb_du,
232 "true for DMA in packet per buffer mode with descriptor update");
233module_param(use_fullspeed, bool, S_IRUGO);
234MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
235
236/*---------------------------------------------------------------------------*/
237/* Prints UDC device registers and endpoint irq registers */
238static void print_regs(struct udc *dev)
239{
240 DBG(dev, "------- Device registers -------\n");
241 DBG(dev, "dev config = %08x\n", readl(&dev->regs->cfg));
242 DBG(dev, "dev control = %08x\n", readl(&dev->regs->ctl));
243 DBG(dev, "dev status = %08x\n", readl(&dev->regs->sts));
244 DBG(dev, "\n");
245 DBG(dev, "dev int's = %08x\n", readl(&dev->regs->irqsts));
246 DBG(dev, "dev intmask = %08x\n", readl(&dev->regs->irqmsk));
247 DBG(dev, "\n");
248 DBG(dev, "dev ep int's = %08x\n", readl(&dev->regs->ep_irqsts));
249 DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
250 DBG(dev, "\n");
251 DBG(dev, "USE DMA = %d\n", use_dma);
252 if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
253 DBG(dev, "DMA mode = PPBNDU (packet per buffer "
254 "WITHOUT desc. update)\n");
255 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU");
0cf7a633 256 } else if (use_dma && use_dma_ppb && use_dma_ppb_du) {
55d402d8
TD
257 DBG(dev, "DMA mode = PPBDU (packet per buffer "
258 "WITH desc. update)\n");
259 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU");
260 }
261 if (use_dma && use_dma_bufferfill_mode) {
262 DBG(dev, "DMA mode = BF (buffer fill mode)\n");
263 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF");
264 }
170b778f 265 if (!use_dma)
55d402d8 266 dev_info(&dev->pdev->dev, "FIFO mode\n");
55d402d8
TD
267 DBG(dev, "-------------------------------------------------------\n");
268}
269
270/* Masks unused interrupts */
271static int udc_mask_unused_interrupts(struct udc *dev)
272{
273 u32 tmp;
274
275 /* mask all dev interrupts */
276 tmp = AMD_BIT(UDC_DEVINT_SVC) |
277 AMD_BIT(UDC_DEVINT_ENUM) |
278 AMD_BIT(UDC_DEVINT_US) |
279 AMD_BIT(UDC_DEVINT_UR) |
280 AMD_BIT(UDC_DEVINT_ES) |
281 AMD_BIT(UDC_DEVINT_SI) |
282 AMD_BIT(UDC_DEVINT_SOF)|
283 AMD_BIT(UDC_DEVINT_SC);
284 writel(tmp, &dev->regs->irqmsk);
285
286 /* mask all ep interrupts */
287 writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
288
289 return 0;
290}
291
292/* Enables endpoint 0 interrupts */
293static int udc_enable_ep0_interrupts(struct udc *dev)
294{
295 u32 tmp;
296
297 DBG(dev, "udc_enable_ep0_interrupts()\n");
298
299 /* read irq mask */
300 tmp = readl(&dev->regs->ep_irqmsk);
301 /* enable ep0 irq's */
302 tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
303 & AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
304 writel(tmp, &dev->regs->ep_irqmsk);
305
306 return 0;
307}
308
309/* Enables device interrupts for SET_INTF and SET_CONFIG */
310static int udc_enable_dev_setup_interrupts(struct udc *dev)
311{
312 u32 tmp;
313
314 DBG(dev, "enable device interrupts for setup data\n");
315
316 /* read irq mask */
317 tmp = readl(&dev->regs->irqmsk);
318
319 /* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
320 tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
321 & AMD_UNMASK_BIT(UDC_DEVINT_SC)
322 & AMD_UNMASK_BIT(UDC_DEVINT_UR)
323 & AMD_UNMASK_BIT(UDC_DEVINT_SVC)
324 & AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
325 writel(tmp, &dev->regs->irqmsk);
326
327 return 0;
328}
329
25985edc 330/* Calculates fifo start of endpoint based on preceding endpoints */
55d402d8
TD
331static int udc_set_txfifo_addr(struct udc_ep *ep)
332{
333 struct udc *dev;
334 u32 tmp;
335 int i;
336
337 if (!ep || !(ep->in))
338 return -EINVAL;
339
340 dev = ep->dev;
341 ep->txfifo = dev->txfifo;
342
343 /* traverse ep's */
344 for (i = 0; i < ep->num; i++) {
345 if (dev->ep[i].regs) {
346 /* read fifo size */
347 tmp = readl(&dev->ep[i].regs->bufin_framenum);
348 tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
349 ep->txfifo += tmp;
350 }
351 }
352 return 0;
353}
354
355/* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
356static u32 cnak_pending;
357
358static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
359{
360 if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
361 DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
362 cnak_pending |= 1 << (num);
363 ep->naking = 1;
364 } else
365 cnak_pending = cnak_pending & (~(1 << (num)));
366}
367
368
369/* Enables endpoint, is called by gadget driver */
370static int
371udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
372{
373 struct udc_ep *ep;
374 struct udc *dev;
375 u32 tmp;
376 unsigned long iflags;
377 u8 udc_csr_epix;
fd05e720 378 unsigned maxpacket;
55d402d8
TD
379
380 if (!usbep
381 || usbep->name == ep0_string
382 || !desc
383 || desc->bDescriptorType != USB_DT_ENDPOINT)
384 return -EINVAL;
385
386 ep = container_of(usbep, struct udc_ep, ep);
387 dev = ep->dev;
388
389 DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
390
391 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
392 return -ESHUTDOWN;
393
394 spin_lock_irqsave(&dev->lock, iflags);
ef20a72b 395 ep->ep.desc = desc;
55d402d8
TD
396
397 ep->halted = 0;
398
399 /* set traffic type */
400 tmp = readl(&dev->ep[ep->num].regs->ctl);
401 tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
402 writel(tmp, &dev->ep[ep->num].regs->ctl);
403
404 /* set max packet size */
29cc8897 405 maxpacket = usb_endpoint_maxp(desc);
55d402d8 406 tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
fd05e720
AV
407 tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE);
408 ep->ep.maxpacket = maxpacket;
55d402d8
TD
409 writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
410
411 /* IN ep */
412 if (ep->in) {
413
414 /* ep ix in UDC CSR register space */
415 udc_csr_epix = ep->num;
416
417 /* set buffer size (tx fifo entries) */
418 tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
419 /* double buffering: fifo size = 2 x max packet size */
420 tmp = AMD_ADDBITS(
421 tmp,
fd05e720
AV
422 maxpacket * UDC_EPIN_BUFF_SIZE_MULT
423 / UDC_DWORD_BYTES,
55d402d8
TD
424 UDC_EPIN_BUFF_SIZE);
425 writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
426
427 /* calc. tx fifo base addr */
428 udc_set_txfifo_addr(ep);
429
430 /* flush fifo */
431 tmp = readl(&ep->regs->ctl);
432 tmp |= AMD_BIT(UDC_EPCTL_F);
433 writel(tmp, &ep->regs->ctl);
434
435 /* OUT ep */
436 } else {
437 /* ep ix in UDC CSR register space */
438 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
439
440 /* set max packet size UDC CSR */
441 tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
fd05e720 442 tmp = AMD_ADDBITS(tmp, maxpacket,
55d402d8
TD
443 UDC_CSR_NE_MAX_PKT);
444 writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
445
446 if (use_dma && !ep->in) {
447 /* alloc and init BNA dummy request */
448 ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
449 ep->bna_occurred = 0;
450 }
451
452 if (ep->num != UDC_EP0OUT_IX)
453 dev->data_ep_enabled = 1;
454 }
455
456 /* set ep values */
457 tmp = readl(&dev->csr->ne[udc_csr_epix]);
458 /* max packet */
fd05e720 459 tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT);
55d402d8
TD
460 /* ep number */
461 tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
462 /* ep direction */
463 tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
464 /* ep type */
465 tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
466 /* ep config */
467 tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
468 /* ep interface */
469 tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
470 /* ep alt */
471 tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
472 /* write reg */
473 writel(tmp, &dev->csr->ne[udc_csr_epix]);
474
475 /* enable ep irq */
476 tmp = readl(&dev->regs->ep_irqmsk);
477 tmp &= AMD_UNMASK_BIT(ep->num);
478 writel(tmp, &dev->regs->ep_irqmsk);
479
480 /*
481 * clear NAK by writing CNAK
482 * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
483 */
484 if (!use_dma || ep->in) {
485 tmp = readl(&ep->regs->ctl);
486 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
487 writel(tmp, &ep->regs->ctl);
488 ep->naking = 0;
489 UDC_QUEUE_CNAK(ep, ep->num);
490 }
491 tmp = desc->bEndpointAddress;
492 DBG(dev, "%s enabled\n", usbep->name);
493
494 spin_unlock_irqrestore(&dev->lock, iflags);
495 return 0;
496}
497
498/* Resets endpoint */
499static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
500{
501 u32 tmp;
502
503 VDBG(ep->dev, "ep-%d reset\n", ep->num);
f9c56cdd 504 ep->ep.desc = NULL;
55d402d8
TD
505 ep->ep.ops = &udc_ep_ops;
506 INIT_LIST_HEAD(&ep->queue);
507
e117e742 508 usb_ep_set_maxpacket_limit(&ep->ep,(u16) ~0);
55d402d8
TD
509 /* set NAK */
510 tmp = readl(&ep->regs->ctl);
511 tmp |= AMD_BIT(UDC_EPCTL_SNAK);
512 writel(tmp, &ep->regs->ctl);
513 ep->naking = 1;
514
515 /* disable interrupt */
516 tmp = readl(&regs->ep_irqmsk);
517 tmp |= AMD_BIT(ep->num);
518 writel(tmp, &regs->ep_irqmsk);
519
520 if (ep->in) {
521 /* unset P and IN bit of potential former DMA */
522 tmp = readl(&ep->regs->ctl);
523 tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
524 writel(tmp, &ep->regs->ctl);
525
526 tmp = readl(&ep->regs->sts);
527 tmp |= AMD_BIT(UDC_EPSTS_IN);
528 writel(tmp, &ep->regs->sts);
529
530 /* flush the fifo */
531 tmp = readl(&ep->regs->ctl);
532 tmp |= AMD_BIT(UDC_EPCTL_F);
533 writel(tmp, &ep->regs->ctl);
534
535 }
536 /* reset desc pointer */
537 writel(0, &ep->regs->desptr);
538}
539
540/* Disables endpoint, is called by gadget driver */
541static int udc_ep_disable(struct usb_ep *usbep)
542{
543 struct udc_ep *ep = NULL;
544 unsigned long iflags;
545
546 if (!usbep)
547 return -EINVAL;
548
549 ep = container_of(usbep, struct udc_ep, ep);
ef20a72b 550 if (usbep->name == ep0_string || !ep->ep.desc)
55d402d8
TD
551 return -EINVAL;
552
553 DBG(ep->dev, "Disable ep-%d\n", ep->num);
554
555 spin_lock_irqsave(&ep->dev->lock, iflags);
556 udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
557 empty_req_queue(ep);
558 ep_init(ep->dev->regs, ep);
559 spin_unlock_irqrestore(&ep->dev->lock, iflags);
560
561 return 0;
562}
563
564/* Allocates request packet, called by gadget driver */
565static struct usb_request *
566udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
567{
568 struct udc_request *req;
569 struct udc_data_dma *dma_desc;
570 struct udc_ep *ep;
571
572 if (!usbep)
573 return NULL;
574
575 ep = container_of(usbep, struct udc_ep, ep);
576
577 VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
578 req = kzalloc(sizeof(struct udc_request), gfp);
579 if (!req)
580 return NULL;
581
582 req->req.dma = DMA_DONT_USE;
583 INIT_LIST_HEAD(&req->queue);
584
585 if (ep->dma) {
586 /* ep0 in requests are allocated from data pool here */
587 dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
588 &req->td_phys);
589 if (!dma_desc) {
590 kfree(req);
591 return NULL;
592 }
593
594 VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
595 "td_phys = %lx\n",
596 req, dma_desc,
597 (unsigned long)req->td_phys);
598 /* prevent from using desc. - set HOST BUSY */
599 dma_desc->status = AMD_ADDBITS(dma_desc->status,
600 UDC_DMA_STP_STS_BS_HOST_BUSY,
601 UDC_DMA_STP_STS_BS);
551509d2 602 dma_desc->bufptr = cpu_to_le32(DMA_DONT_USE);
55d402d8
TD
603 req->td_data = dma_desc;
604 req->td_data_last = NULL;
605 req->chain_len = 1;
606 }
607
608 return &req->req;
609}
610
3719b9bd
SM
611/* frees pci pool descriptors of a DMA chain */
612static int udc_free_dma_chain(struct udc *dev, struct udc_request *req)
613{
614 int ret_val = 0;
615 struct udc_data_dma *td;
616 struct udc_data_dma *td_last = NULL;
617 unsigned int i;
618
619 DBG(dev, "free chain req = %p\n", req);
620
621 /* do not free first desc., will be done by free for request */
622 td_last = req->td_data;
623 td = phys_to_virt(td_last->next);
624
625 for (i = 1; i < req->chain_len; i++) {
626 pci_pool_free(dev->data_requests, td,
627 (dma_addr_t)td_last->next);
628 td_last = td;
629 td = phys_to_virt(td_last->next);
630 }
631
632 return ret_val;
633}
634
55d402d8
TD
635/* Frees request packet, called by gadget driver */
636static void
637udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
638{
639 struct udc_ep *ep;
640 struct udc_request *req;
641
642 if (!usbep || !usbreq)
643 return;
644
645 ep = container_of(usbep, struct udc_ep, ep);
646 req = container_of(usbreq, struct udc_request, req);
647 VDBG(ep->dev, "free_req req=%p\n", req);
648 BUG_ON(!list_empty(&req->queue));
649 if (req->td_data) {
650 VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
651
652 /* free dma chain if created */
170b778f 653 if (req->chain_len > 1)
55d402d8 654 udc_free_dma_chain(ep->dev, req);
55d402d8
TD
655
656 pci_pool_free(ep->dev->data_requests, req->td_data,
657 req->td_phys);
658 }
659 kfree(req);
660}
661
662/* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
663static void udc_init_bna_dummy(struct udc_request *req)
664{
665 if (req) {
666 /* set last bit */
667 req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
668 /* set next pointer to itself */
669 req->td_data->next = req->td_phys;
670 /* set HOST BUSY */
671 req->td_data->status
672 = AMD_ADDBITS(req->td_data->status,
673 UDC_DMA_STP_STS_BS_DMA_DONE,
674 UDC_DMA_STP_STS_BS);
675#ifdef UDC_VERBOSE
676 pr_debug("bna desc = %p, sts = %08x\n",
677 req->td_data, req->td_data->status);
678#endif
679 }
680}
681
682/* Allocate BNA dummy descriptor */
683static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
684{
685 struct udc_request *req = NULL;
686 struct usb_request *_req = NULL;
687
688 /* alloc the dummy request */
689 _req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
690 if (_req) {
691 req = container_of(_req, struct udc_request, req);
692 ep->bna_dummy_req = req;
693 udc_init_bna_dummy(req);
694 }
695 return req;
696}
697
698/* Write data to TX fifo for IN packets */
699static void
700udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
701{
702 u8 *req_buf;
703 u32 *buf;
704 int i, j;
705 unsigned bytes = 0;
706 unsigned remaining = 0;
707
708 if (!req || !ep)
709 return;
710
711 req_buf = req->buf + req->actual;
712 prefetch(req_buf);
713 remaining = req->length - req->actual;
714
715 buf = (u32 *) req_buf;
716
717 bytes = ep->ep.maxpacket;
718 if (bytes > remaining)
719 bytes = remaining;
720
721 /* dwords first */
170b778f 722 for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
55d402d8 723 writel(*(buf + i), ep->txfifo);
55d402d8
TD
724
725 /* remaining bytes must be written by byte access */
726 for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
727 writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
728 ep->txfifo);
729 }
730
731 /* dummy write confirm */
732 writel(0, &ep->regs->confirm);
733}
734
735/* Read dwords from RX fifo for OUT transfers */
736static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
737{
738 int i;
739
740 VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
741
170b778f 742 for (i = 0; i < dwords; i++)
55d402d8 743 *(buf + i) = readl(dev->rxfifo);
55d402d8
TD
744 return 0;
745}
746
747/* Read bytes from RX fifo for OUT transfers */
748static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
749{
750 int i, j;
751 u32 tmp;
752
753 VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
754
755 /* dwords first */
170b778f 756 for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
55d402d8 757 *((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
55d402d8
TD
758
759 /* remaining bytes must be read by byte access */
760 if (bytes % UDC_DWORD_BYTES) {
761 tmp = readl(dev->rxfifo);
762 for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
763 *(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
764 tmp = tmp >> UDC_BITS_PER_BYTE;
765 }
766 }
767
768 return 0;
769}
770
771/* Read data from RX fifo for OUT transfers */
772static int
773udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
774{
775 u8 *buf;
776 unsigned buf_space;
777 unsigned bytes = 0;
778 unsigned finished = 0;
779
780 /* received number bytes */
781 bytes = readl(&ep->regs->sts);
782 bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
783
784 buf_space = req->req.length - req->req.actual;
785 buf = req->req.buf + req->req.actual;
786 if (bytes > buf_space) {
787 if ((buf_space % ep->ep.maxpacket) != 0) {
788 DBG(ep->dev,
789 "%s: rx %d bytes, rx-buf space = %d bytesn\n",
790 ep->ep.name, bytes, buf_space);
791 req->req.status = -EOVERFLOW;
792 }
793 bytes = buf_space;
794 }
795 req->req.actual += bytes;
796
797 /* last packet ? */
798 if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
799 || ((req->req.actual == req->req.length) && !req->req.zero))
800 finished = 1;
801
802 /* read rx fifo bytes */
803 VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
804 udc_rxfifo_read_bytes(ep->dev, buf, bytes);
805
806 return finished;
807}
808
c9760ad8
SM
809/* Creates or re-inits a DMA chain */
810static int udc_create_dma_chain(
811 struct udc_ep *ep,
812 struct udc_request *req,
813 unsigned long buf_len, gfp_t gfp_flags
814)
815{
816 unsigned long bytes = req->req.length;
817 unsigned int i;
818 dma_addr_t dma_addr;
819 struct udc_data_dma *td = NULL;
820 struct udc_data_dma *last = NULL;
821 unsigned long txbytes;
822 unsigned create_new_chain = 0;
823 unsigned len;
824
825 VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
826 bytes, buf_len);
827 dma_addr = DMA_DONT_USE;
828
829 /* unset L bit in first desc for OUT */
830 if (!ep->in)
831 req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
832
833 /* alloc only new desc's if not already available */
834 len = req->req.length / ep->ep.maxpacket;
835 if (req->req.length % ep->ep.maxpacket)
836 len++;
837
838 if (len > req->chain_len) {
839 /* shorter chain already allocated before */
840 if (req->chain_len > 1)
841 udc_free_dma_chain(ep->dev, req);
842 req->chain_len = len;
843 create_new_chain = 1;
844 }
845
846 td = req->td_data;
847 /* gen. required number of descriptors and buffers */
848 for (i = buf_len; i < bytes; i += buf_len) {
849 /* create or determine next desc. */
850 if (create_new_chain) {
851 td = pci_pool_alloc(ep->dev->data_requests,
852 gfp_flags, &dma_addr);
853 if (!td)
854 return -ENOMEM;
855
856 td->status = 0;
857 } else if (i == buf_len) {
858 /* first td */
859 td = (struct udc_data_dma *)phys_to_virt(
860 req->td_data->next);
861 td->status = 0;
862 } else {
863 td = (struct udc_data_dma *)phys_to_virt(last->next);
864 td->status = 0;
865 }
866
867 if (td)
868 td->bufptr = req->req.dma + i; /* assign buffer */
869 else
870 break;
871
872 /* short packet ? */
873 if ((bytes - i) >= buf_len) {
874 txbytes = buf_len;
875 } else {
876 /* short packet */
877 txbytes = bytes - i;
878 }
879
880 /* link td and assign tx bytes */
881 if (i == buf_len) {
882 if (create_new_chain)
883 req->td_data->next = dma_addr;
884 /*
885 * else
886 * req->td_data->next = virt_to_phys(td);
887 */
888 /* write tx bytes */
889 if (ep->in) {
890 /* first desc */
891 req->td_data->status =
892 AMD_ADDBITS(req->td_data->status,
893 ep->ep.maxpacket,
894 UDC_DMA_IN_STS_TXBYTES);
895 /* second desc */
896 td->status = AMD_ADDBITS(td->status,
897 txbytes,
898 UDC_DMA_IN_STS_TXBYTES);
899 }
900 } else {
901 if (create_new_chain)
902 last->next = dma_addr;
903 /*
904 * else
905 * last->next = virt_to_phys(td);
906 */
907 if (ep->in) {
908 /* write tx bytes */
909 td->status = AMD_ADDBITS(td->status,
910 txbytes,
911 UDC_DMA_IN_STS_TXBYTES);
912 }
913 }
914 last = td;
915 }
916 /* set last bit */
917 if (td) {
918 td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
919 /* last desc. points to itself */
920 req->td_data_last = td;
921 }
922
923 return 0;
924}
925
55d402d8
TD
926/* create/re-init a DMA descriptor or a DMA descriptor chain */
927static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
928{
929 int retval = 0;
930 u32 tmp;
931
932 VDBG(ep->dev, "prep_dma\n");
933 VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
934 ep->num, req->td_data);
935
936 /* set buffer pointer */
937 req->td_data->bufptr = req->req.dma;
938
939 /* set last bit */
940 req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
941
942 /* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
943 if (use_dma_ppb) {
944
945 retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
946 if (retval != 0) {
947 if (retval == -ENOMEM)
948 DBG(ep->dev, "Out of DMA memory\n");
949 return retval;
950 }
951 if (ep->in) {
952 if (req->req.length == ep->ep.maxpacket) {
953 /* write tx bytes */
954 req->td_data->status =
955 AMD_ADDBITS(req->td_data->status,
956 ep->ep.maxpacket,
957 UDC_DMA_IN_STS_TXBYTES);
958
959 }
960 }
961
962 }
963
964 if (ep->in) {
965 VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
966 "maxpacket=%d ep%d\n",
967 use_dma_ppb, req->req.length,
968 ep->ep.maxpacket, ep->num);
969 /*
970 * if bytes < max packet then tx bytes must
971 * be written in packet per buffer mode
972 */
973 if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
974 || ep->num == UDC_EP0OUT_IX
975 || ep->num == UDC_EP0IN_IX) {
976 /* write tx bytes */
977 req->td_data->status =
978 AMD_ADDBITS(req->td_data->status,
979 req->req.length,
980 UDC_DMA_IN_STS_TXBYTES);
981 /* reset frame num */
982 req->td_data->status =
983 AMD_ADDBITS(req->td_data->status,
984 0,
985 UDC_DMA_IN_STS_FRAMENUM);
986 }
987 /* set HOST BUSY */
988 req->td_data->status =
989 AMD_ADDBITS(req->td_data->status,
990 UDC_DMA_STP_STS_BS_HOST_BUSY,
991 UDC_DMA_STP_STS_BS);
992 } else {
993 VDBG(ep->dev, "OUT set host ready\n");
994 /* set HOST READY */
995 req->td_data->status =
996 AMD_ADDBITS(req->td_data->status,
997 UDC_DMA_STP_STS_BS_HOST_READY,
998 UDC_DMA_STP_STS_BS);
999
1000
1001 /* clear NAK by writing CNAK */
1002 if (ep->naking) {
1003 tmp = readl(&ep->regs->ctl);
1004 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1005 writel(tmp, &ep->regs->ctl);
1006 ep->naking = 0;
1007 UDC_QUEUE_CNAK(ep, ep->num);
1008 }
1009
1010 }
1011
1012 return retval;
1013}
1014
1015/* Completes request packet ... caller MUST hold lock */
1016static void
1017complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
1018__releases(ep->dev->lock)
1019__acquires(ep->dev->lock)
1020{
1021 struct udc *dev;
1022 unsigned halted;
1023
1024 VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
1025
1026 dev = ep->dev;
1027 /* unmap DMA */
220e8600
FB
1028 if (ep->dma)
1029 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in);
55d402d8
TD
1030
1031 halted = ep->halted;
1032 ep->halted = 1;
1033
1034 /* set new status if pending */
1035 if (req->req.status == -EINPROGRESS)
1036 req->req.status = sts;
1037
1038 /* remove from ep queue */
1039 list_del_init(&req->queue);
1040
1041 VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
1042 &req->req, req->req.length, ep->ep.name, sts);
1043
1044 spin_unlock(&dev->lock);
304f7e5e 1045 usb_gadget_giveback_request(&ep->ep, &req->req);
55d402d8
TD
1046 spin_lock(&dev->lock);
1047 ep->halted = halted;
1048}
1049
55d402d8
TD
1050/* Iterates to the end of a DMA chain and returns last descriptor */
1051static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
1052{
1053 struct udc_data_dma *td;
1054
1055 td = req->td_data;
170b778f 1056 while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L)))
55d402d8 1057 td = phys_to_virt(td->next);
55d402d8
TD
1058
1059 return td;
1060
1061}
1062
1063/* Iterates to the end of a DMA chain and counts bytes received */
1064static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
1065{
1066 struct udc_data_dma *td;
1067 u32 count;
1068
1069 td = req->td_data;
1070 /* received number bytes */
1071 count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
1072
1073 while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
1074 td = phys_to_virt(td->next);
1075 /* received number bytes */
1076 if (td) {
1077 count += AMD_GETBITS(td->status,
1078 UDC_DMA_OUT_STS_RXBYTES);
1079 }
1080 }
1081
1082 return count;
1083
1084}
1085
55d402d8
TD
1086/* Enabling RX DMA */
1087static void udc_set_rde(struct udc *dev)
1088{
1089 u32 tmp;
1090
1091 VDBG(dev, "udc_set_rde()\n");
1092 /* stop RDE timer */
1093 if (timer_pending(&udc_timer)) {
1094 set_rde = 0;
1095 mod_timer(&udc_timer, jiffies - 1);
1096 }
1097 /* set RDE */
1098 tmp = readl(&dev->regs->ctl);
1099 tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1100 writel(tmp, &dev->regs->ctl);
1101}
1102
1103/* Queues a request packet, called by gadget driver */
1104static int
1105udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
1106{
1107 int retval = 0;
1108 u8 open_rxfifo = 0;
1109 unsigned long iflags;
1110 struct udc_ep *ep;
1111 struct udc_request *req;
1112 struct udc *dev;
1113 u32 tmp;
1114
1115 /* check the inputs */
1116 req = container_of(usbreq, struct udc_request, req);
1117
1118 if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
1119 || !list_empty(&req->queue))
1120 return -EINVAL;
1121
1122 ep = container_of(usbep, struct udc_ep, ep);
ef20a72b 1123 if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
55d402d8
TD
1124 return -EINVAL;
1125
1126 VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
1127 dev = ep->dev;
1128
1129 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
1130 return -ESHUTDOWN;
1131
1132 /* map dma (usually done before) */
220e8600 1133 if (ep->dma) {
55d402d8 1134 VDBG(dev, "DMA map req %p\n", req);
220e8600
FB
1135 retval = usb_gadget_map_request(&udc->gadget, usbreq, ep->in);
1136 if (retval)
1137 return retval;
55d402d8
TD
1138 }
1139
1140 VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
1141 usbep->name, usbreq, usbreq->length,
1142 req->td_data, usbreq->buf);
1143
1144 spin_lock_irqsave(&dev->lock, iflags);
1145 usbreq->actual = 0;
1146 usbreq->status = -EINPROGRESS;
1147 req->dma_done = 0;
1148
1149 /* on empty queue just do first transfer */
1150 if (list_empty(&ep->queue)) {
1151 /* zlp */
1152 if (usbreq->length == 0) {
1153 /* IN zlp's are handled by hardware */
1154 complete_req(ep, req, 0);
1155 VDBG(dev, "%s: zlp\n", ep->ep.name);
1156 /*
1157 * if set_config or set_intf is waiting for ack by zlp
1158 * then set CSR_DONE
1159 */
1160 if (dev->set_cfg_not_acked) {
1161 tmp = readl(&dev->regs->ctl);
1162 tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
1163 writel(tmp, &dev->regs->ctl);
1164 dev->set_cfg_not_acked = 0;
1165 }
1166 /* setup command is ACK'ed now by zlp */
1167 if (dev->waiting_zlp_ack_ep0in) {
1168 /* clear NAK by writing CNAK in EP0_IN */
1169 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1170 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1171 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1172 dev->ep[UDC_EP0IN_IX].naking = 0;
1173 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
1174 UDC_EP0IN_IX);
1175 dev->waiting_zlp_ack_ep0in = 0;
1176 }
1177 goto finished;
1178 }
1179 if (ep->dma) {
ffcba5a5 1180 retval = prep_dma(ep, req, GFP_ATOMIC);
55d402d8
TD
1181 if (retval != 0)
1182 goto finished;
1183 /* write desc pointer to enable DMA */
1184 if (ep->in) {
1185 /* set HOST READY */
1186 req->td_data->status =
1187 AMD_ADDBITS(req->td_data->status,
1188 UDC_DMA_IN_STS_BS_HOST_READY,
1189 UDC_DMA_IN_STS_BS);
1190 }
1191
1192 /* disabled rx dma while descriptor update */
1193 if (!ep->in) {
1194 /* stop RDE timer */
1195 if (timer_pending(&udc_timer)) {
1196 set_rde = 0;
1197 mod_timer(&udc_timer, jiffies - 1);
1198 }
1199 /* clear RDE */
1200 tmp = readl(&dev->regs->ctl);
1201 tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1202 writel(tmp, &dev->regs->ctl);
1203 open_rxfifo = 1;
1204
1205 /*
1206 * if BNA occurred then let BNA dummy desc.
1207 * point to current desc.
1208 */
1209 if (ep->bna_occurred) {
1210 VDBG(dev, "copy to BNA dummy desc.\n");
1211 memcpy(ep->bna_dummy_req->td_data,
1212 req->td_data,
1213 sizeof(struct udc_data_dma));
1214 }
1215 }
1216 /* write desc pointer */
1217 writel(req->td_phys, &ep->regs->desptr);
1218
1219 /* clear NAK by writing CNAK */
1220 if (ep->naking) {
1221 tmp = readl(&ep->regs->ctl);
1222 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1223 writel(tmp, &ep->regs->ctl);
1224 ep->naking = 0;
1225 UDC_QUEUE_CNAK(ep, ep->num);
1226 }
1227
1228 if (ep->in) {
1229 /* enable ep irq */
1230 tmp = readl(&dev->regs->ep_irqmsk);
1231 tmp &= AMD_UNMASK_BIT(ep->num);
1232 writel(tmp, &dev->regs->ep_irqmsk);
1233 }
c5deb832
TD
1234 } else if (ep->in) {
1235 /* enable ep irq */
1236 tmp = readl(&dev->regs->ep_irqmsk);
1237 tmp &= AMD_UNMASK_BIT(ep->num);
1238 writel(tmp, &dev->regs->ep_irqmsk);
1239 }
55d402d8
TD
1240
1241 } else if (ep->dma) {
1242
1243 /*
1244 * prep_dma not used for OUT ep's, this is not possible
1245 * for PPB modes, because of chain creation reasons
1246 */
1247 if (ep->in) {
ffcba5a5 1248 retval = prep_dma(ep, req, GFP_ATOMIC);
55d402d8
TD
1249 if (retval != 0)
1250 goto finished;
1251 }
1252 }
1253 VDBG(dev, "list_add\n");
1254 /* add request to ep queue */
1255 if (req) {
1256
1257 list_add_tail(&req->queue, &ep->queue);
1258
1259 /* open rxfifo if out data queued */
1260 if (open_rxfifo) {
1261 /* enable DMA */
1262 req->dma_going = 1;
1263 udc_set_rde(dev);
1264 if (ep->num != UDC_EP0OUT_IX)
1265 dev->data_ep_queued = 1;
1266 }
1267 /* stop OUT naking */
1268 if (!ep->in) {
1269 if (!use_dma && udc_rxfifo_pending) {
fec8de3a 1270 DBG(dev, "udc_queue(): pending bytes in "
55d402d8
TD
1271 "rxfifo after nyet\n");
1272 /*
1273 * read pending bytes afer nyet:
1274 * referring to isr
1275 */
1276 if (udc_rxfifo_read(ep, req)) {
1277 /* finish */
1278 complete_req(ep, req, 0);
1279 }
1280 udc_rxfifo_pending = 0;
1281
1282 }
1283 }
1284 }
1285
1286finished:
1287 spin_unlock_irqrestore(&dev->lock, iflags);
1288 return retval;
1289}
1290
1291/* Empty request queue of an endpoint; caller holds spinlock */
1292static void empty_req_queue(struct udc_ep *ep)
1293{
1294 struct udc_request *req;
1295
1296 ep->halted = 1;
1297 while (!list_empty(&ep->queue)) {
1298 req = list_entry(ep->queue.next,
1299 struct udc_request,
1300 queue);
1301 complete_req(ep, req, -ESHUTDOWN);
1302 }
1303}
1304
1305/* Dequeues a request packet, called by gadget driver */
1306static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
1307{
1308 struct udc_ep *ep;
1309 struct udc_request *req;
1310 unsigned halted;
1311 unsigned long iflags;
1312
1313 ep = container_of(usbep, struct udc_ep, ep);
ef20a72b 1314 if (!usbep || !usbreq || (!ep->ep.desc && (ep->num != 0
55d402d8
TD
1315 && ep->num != UDC_EP0OUT_IX)))
1316 return -EINVAL;
1317
1318 req = container_of(usbreq, struct udc_request, req);
1319
1320 spin_lock_irqsave(&ep->dev->lock, iflags);
1321 halted = ep->halted;
1322 ep->halted = 1;
1323 /* request in processing or next one */
1324 if (ep->queue.next == &req->queue) {
1325 if (ep->dma && req->dma_going) {
1326 if (ep->in)
1327 ep->cancel_transfer = 1;
1328 else {
1329 u32 tmp;
1330 u32 dma_sts;
1331 /* stop potential receive DMA */
1332 tmp = readl(&udc->regs->ctl);
1333 writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
1334 &udc->regs->ctl);
1335 /*
1336 * Cancel transfer later in ISR
1337 * if descriptor was touched.
1338 */
1339 dma_sts = AMD_GETBITS(req->td_data->status,
1340 UDC_DMA_OUT_STS_BS);
1341 if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
1342 ep->cancel_transfer = 1;
1343 else {
1344 udc_init_bna_dummy(ep->req);
1345 writel(ep->bna_dummy_req->td_phys,
1346 &ep->regs->desptr);
1347 }
1348 writel(tmp, &udc->regs->ctl);
1349 }
1350 }
1351 }
1352 complete_req(ep, req, -ECONNRESET);
1353 ep->halted = halted;
1354
1355 spin_unlock_irqrestore(&ep->dev->lock, iflags);
1356 return 0;
1357}
1358
1359/* Halt or clear halt of endpoint */
1360static int
1361udc_set_halt(struct usb_ep *usbep, int halt)
1362{
1363 struct udc_ep *ep;
1364 u32 tmp;
1365 unsigned long iflags;
1366 int retval = 0;
1367
1368 if (!usbep)
1369 return -EINVAL;
1370
1371 pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
1372
1373 ep = container_of(usbep, struct udc_ep, ep);
ef20a72b 1374 if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
55d402d8
TD
1375 return -EINVAL;
1376 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1377 return -ESHUTDOWN;
1378
1379 spin_lock_irqsave(&udc_stall_spinlock, iflags);
1380 /* halt or clear halt */
1381 if (halt) {
1382 if (ep->num == 0)
1383 ep->dev->stall_ep0in = 1;
1384 else {
1385 /*
1386 * set STALL
1387 * rxfifo empty not taken into acount
1388 */
1389 tmp = readl(&ep->regs->ctl);
1390 tmp |= AMD_BIT(UDC_EPCTL_S);
1391 writel(tmp, &ep->regs->ctl);
1392 ep->halted = 1;
1393
1394 /* setup poll timer */
1395 if (!timer_pending(&udc_pollstall_timer)) {
1396 udc_pollstall_timer.expires = jiffies +
1397 HZ * UDC_POLLSTALL_TIMER_USECONDS
1398 / (1000 * 1000);
1399 if (!stop_pollstall_timer) {
1400 DBG(ep->dev, "start polltimer\n");
1401 add_timer(&udc_pollstall_timer);
1402 }
1403 }
1404 }
1405 } else {
1406 /* ep is halted by set_halt() before */
1407 if (ep->halted) {
1408 tmp = readl(&ep->regs->ctl);
1409 /* clear stall bit */
1410 tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
1411 /* clear NAK by writing CNAK */
1412 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1413 writel(tmp, &ep->regs->ctl);
1414 ep->halted = 0;
1415 UDC_QUEUE_CNAK(ep, ep->num);
1416 }
1417 }
1418 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1419 return retval;
1420}
1421
1422/* gadget interface */
1423static const struct usb_ep_ops udc_ep_ops = {
1424 .enable = udc_ep_enable,
1425 .disable = udc_ep_disable,
1426
1427 .alloc_request = udc_alloc_request,
1428 .free_request = udc_free_request,
1429
1430 .queue = udc_queue,
1431 .dequeue = udc_dequeue,
1432
1433 .set_halt = udc_set_halt,
1434 /* fifo ops not implemented */
1435};
1436
1437/*-------------------------------------------------------------------------*/
1438
1439/* Get frame counter (not implemented) */
1440static int udc_get_frame(struct usb_gadget *gadget)
1441{
1442 return -EOPNOTSUPP;
1443}
1444
79a5b4aa
SM
1445/* Initiates a remote wakeup */
1446static int udc_remote_wakeup(struct udc *dev)
1447{
1448 unsigned long flags;
1449 u32 tmp;
1450
1451 DBG(dev, "UDC initiates remote wakeup\n");
1452
1453 spin_lock_irqsave(&dev->lock, flags);
1454
1455 tmp = readl(&dev->regs->ctl);
1456 tmp |= AMD_BIT(UDC_DEVCTL_RES);
1457 writel(tmp, &dev->regs->ctl);
1458 tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
1459 writel(tmp, &dev->regs->ctl);
1460
1461 spin_unlock_irqrestore(&dev->lock, flags);
1462 return 0;
1463}
1464
55d402d8
TD
1465/* Remote wakeup gadget interface */
1466static int udc_wakeup(struct usb_gadget *gadget)
1467{
1468 struct udc *dev;
1469
1470 if (!gadget)
1471 return -EINVAL;
1472 dev = container_of(gadget, struct udc, gadget);
1473 udc_remote_wakeup(dev);
1474
1475 return 0;
1476}
1477
45005f69
FB
1478static int amd5536_udc_start(struct usb_gadget *g,
1479 struct usb_gadget_driver *driver);
22835b80
FB
1480static int amd5536_udc_stop(struct usb_gadget *g);
1481
55d402d8
TD
1482static const struct usb_gadget_ops udc_ops = {
1483 .wakeup = udc_wakeup,
1484 .get_frame = udc_get_frame,
45005f69
FB
1485 .udc_start = amd5536_udc_start,
1486 .udc_stop = amd5536_udc_stop,
55d402d8
TD
1487};
1488
1489/* Setups endpoint parameters, adds endpoints to linked list */
1490static void make_ep_lists(struct udc *dev)
1491{
1492 /* make gadget ep lists */
1493 INIT_LIST_HEAD(&dev->gadget.ep_list);
1494 list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
1495 &dev->gadget.ep_list);
1496 list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
1497 &dev->gadget.ep_list);
1498 list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
1499 &dev->gadget.ep_list);
1500
1501 /* fifo config */
1502 dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
1503 if (dev->gadget.speed == USB_SPEED_FULL)
1504 dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
1505 else if (dev->gadget.speed == USB_SPEED_HIGH)
1506 dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
1507 dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
1508}
1509
1510/* init registers at driver load time */
1511static int startup_registers(struct udc *dev)
1512{
1513 u32 tmp;
1514
1515 /* init controller by soft reset */
1516 udc_soft_reset(dev);
1517
1518 /* mask not needed interrupts */
1519 udc_mask_unused_interrupts(dev);
1520
1521 /* put into initial config */
1522 udc_basic_init(dev);
1523 /* link up all endpoints */
1524 udc_setup_endpoints(dev);
1525
1526 /* program speed */
1527 tmp = readl(&dev->regs->cfg);
170b778f 1528 if (use_fullspeed)
55d402d8 1529 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
170b778f 1530 else
55d402d8 1531 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
55d402d8
TD
1532 writel(tmp, &dev->regs->cfg);
1533
1534 return 0;
1535}
1536
1537/* Inits UDC context */
1538static void udc_basic_init(struct udc *dev)
1539{
1540 u32 tmp;
1541
1542 DBG(dev, "udc_basic_init()\n");
1543
1544 dev->gadget.speed = USB_SPEED_UNKNOWN;
1545
1546 /* stop RDE timer */
1547 if (timer_pending(&udc_timer)) {
1548 set_rde = 0;
1549 mod_timer(&udc_timer, jiffies - 1);
1550 }
1551 /* stop poll stall timer */
170b778f 1552 if (timer_pending(&udc_pollstall_timer))
55d402d8 1553 mod_timer(&udc_pollstall_timer, jiffies - 1);
55d402d8
TD
1554 /* disable DMA */
1555 tmp = readl(&dev->regs->ctl);
1556 tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1557 tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
1558 writel(tmp, &dev->regs->ctl);
1559
1560 /* enable dynamic CSR programming */
1561 tmp = readl(&dev->regs->cfg);
1562 tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
1563 /* set self powered */
1564 tmp |= AMD_BIT(UDC_DEVCFG_SP);
1565 /* set remote wakeupable */
1566 tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
1567 writel(tmp, &dev->regs->cfg);
1568
1569 make_ep_lists(dev);
1570
1571 dev->data_ep_enabled = 0;
1572 dev->data_ep_queued = 0;
1573}
1574
1575/* Sets initial endpoint parameters */
1576static void udc_setup_endpoints(struct udc *dev)
1577{
1578 struct udc_ep *ep;
1579 u32 tmp;
1580 u32 reg;
1581
1582 DBG(dev, "udc_setup_endpoints()\n");
1583
1584 /* read enum speed */
1585 tmp = readl(&dev->regs->sts);
1586 tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
170b778f 1587 if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH)
55d402d8 1588 dev->gadget.speed = USB_SPEED_HIGH;
170b778f 1589 else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL)
55d402d8 1590 dev->gadget.speed = USB_SPEED_FULL;
55d402d8
TD
1591
1592 /* set basic ep parameters */
1593 for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
1594 ep = &dev->ep[tmp];
1595 ep->dev = dev;
6f02ac5a
RB
1596 ep->ep.name = ep_info[tmp].name;
1597 ep->ep.caps = ep_info[tmp].caps;
55d402d8
TD
1598 ep->num = tmp;
1599 /* txfifo size is calculated at enable time */
1600 ep->txfifo = dev->txfifo;
1601
1602 /* fifo size */
1603 if (tmp < UDC_EPIN_NUM) {
1604 ep->fifo_depth = UDC_TXFIFO_SIZE;
1605 ep->in = 1;
1606 } else {
1607 ep->fifo_depth = UDC_RXFIFO_SIZE;
1608 ep->in = 0;
1609
1610 }
1611 ep->regs = &dev->ep_regs[tmp];
1612 /*
1613 * ep will be reset only if ep was not enabled before to avoid
1614 * disabling ep interrupts when ENUM interrupt occurs but ep is
1615 * not enabled by gadget driver
1616 */
ef20a72b 1617 if (!ep->ep.desc)
55d402d8 1618 ep_init(dev->regs, ep);
55d402d8
TD
1619
1620 if (use_dma) {
1621 /*
1622 * ep->dma is not really used, just to indicate that
1623 * DMA is active: remove this
1624 * dma regs = dev control regs
1625 */
1626 ep->dma = &dev->regs->ctl;
1627
1628 /* nak OUT endpoints until enable - not for ep0 */
1629 if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
1630 && tmp > UDC_EPIN_NUM) {
1631 /* set NAK */
1632 reg = readl(&dev->ep[tmp].regs->ctl);
1633 reg |= AMD_BIT(UDC_EPCTL_SNAK);
1634 writel(reg, &dev->ep[tmp].regs->ctl);
1635 dev->ep[tmp].naking = 1;
1636
1637 }
1638 }
1639 }
1640 /* EP0 max packet */
1641 if (dev->gadget.speed == USB_SPEED_FULL) {
e117e742
RB
1642 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
1643 UDC_FS_EP0IN_MAX_PKT_SIZE);
1644 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
1645 UDC_FS_EP0OUT_MAX_PKT_SIZE);
55d402d8 1646 } else if (dev->gadget.speed == USB_SPEED_HIGH) {
e117e742
RB
1647 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
1648 UDC_EP0IN_MAX_PKT_SIZE);
1649 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
1650 UDC_EP0OUT_MAX_PKT_SIZE);
55d402d8
TD
1651 }
1652
1653 /*
1654 * with suspend bug workaround, ep0 params for gadget driver
1655 * are set at gadget driver bind() call
1656 */
1657 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
1658 dev->ep[UDC_EP0IN_IX].halted = 0;
1659 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1660
1661 /* init cfg/alt/int */
1662 dev->cur_config = 0;
1663 dev->cur_intf = 0;
1664 dev->cur_alt = 0;
1665}
1666
1667/* Bringup after Connect event, initial bringup to be ready for ep0 events */
1668static void usb_connect(struct udc *dev)
1669{
1670
1671 dev_info(&dev->pdev->dev, "USB Connect\n");
1672
1673 dev->connected = 1;
1674
1675 /* put into initial config */
1676 udc_basic_init(dev);
1677
1678 /* enable device setup interrupts */
1679 udc_enable_dev_setup_interrupts(dev);
1680}
1681
1682/*
1683 * Calls gadget with disconnect event and resets the UDC and makes
1684 * initial bringup to be ready for ep0 events
1685 */
1686static void usb_disconnect(struct udc *dev)
1687{
1688
1689 dev_info(&dev->pdev->dev, "USB Disconnect\n");
1690
1691 dev->connected = 0;
1692
1693 /* mask interrupts */
1694 udc_mask_unused_interrupts(dev);
1695
1696 /* REVISIT there doesn't seem to be a point to having this
1697 * talk to a tasklet ... do it directly, we already hold
1698 * the spinlock needed to process the disconnect.
1699 */
1700
1701 tasklet_schedule(&disconnect_tasklet);
1702}
1703
1704/* Tasklet for disconnect to be outside of interrupt context */
1705static void udc_tasklet_disconnect(unsigned long par)
1706{
1707 struct udc *dev = (struct udc *)(*((struct udc **) par));
1708 u32 tmp;
1709
1710 DBG(dev, "Tasklet disconnect\n");
1711 spin_lock_irq(&dev->lock);
1712
1713 if (dev->driver) {
1714 spin_unlock(&dev->lock);
1715 dev->driver->disconnect(&dev->gadget);
1716 spin_lock(&dev->lock);
1717
1718 /* empty queues */
170b778f 1719 for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
55d402d8 1720 empty_req_queue(&dev->ep[tmp]);
55d402d8
TD
1721
1722 }
1723
1724 /* disable ep0 */
1725 ep_init(dev->regs,
1726 &dev->ep[UDC_EP0IN_IX]);
1727
1728
1729 if (!soft_reset_occured) {
1730 /* init controller by soft reset */
1731 udc_soft_reset(dev);
1732 soft_reset_occured++;
1733 }
1734
1735 /* re-enable dev interrupts */
1736 udc_enable_dev_setup_interrupts(dev);
1737 /* back to full speed ? */
1738 if (use_fullspeed) {
1739 tmp = readl(&dev->regs->cfg);
1740 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1741 writel(tmp, &dev->regs->cfg);
1742 }
1743
1744 spin_unlock_irq(&dev->lock);
1745}
1746
1747/* Reset the UDC core */
1748static void udc_soft_reset(struct udc *dev)
1749{
1750 unsigned long flags;
1751
1752 DBG(dev, "Soft reset\n");
1753 /*
1754 * reset possible waiting interrupts, because int.
1755 * status is lost after soft reset,
1756 * ep int. status reset
1757 */
1758 writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
1759 /* device int. status reset */
1760 writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
1761
1762 spin_lock_irqsave(&udc_irq_spinlock, flags);
1763 writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
1764 readl(&dev->regs->cfg);
1765 spin_unlock_irqrestore(&udc_irq_spinlock, flags);
1766
1767}
1768
1769/* RDE timer callback to set RDE bit */
1770static void udc_timer_function(unsigned long v)
1771{
1772 u32 tmp;
1773
1774 spin_lock_irq(&udc_irq_spinlock);
1775
1776 if (set_rde > 0) {
1777 /*
1778 * open the fifo if fifo was filled on last timer call
1779 * conditionally
1780 */
1781 if (set_rde > 1) {
1782 /* set RDE to receive setup data */
1783 tmp = readl(&udc->regs->ctl);
1784 tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1785 writel(tmp, &udc->regs->ctl);
1786 set_rde = -1;
1787 } else if (readl(&udc->regs->sts)
1788 & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
1789 /*
1790 * if fifo empty setup polling, do not just
1791 * open the fifo
1792 */
1793 udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
170b778f 1794 if (!stop_timer)
55d402d8 1795 add_timer(&udc_timer);
55d402d8
TD
1796 } else {
1797 /*
1798 * fifo contains data now, setup timer for opening
1799 * the fifo when timer expires to be able to receive
1800 * setup packets, when data packets gets queued by
1801 * gadget layer then timer will forced to expire with
1802 * set_rde=0 (RDE is set in udc_queue())
1803 */
1804 set_rde++;
1805 /* debug: lhadmot_timer_start = 221070 */
1806 udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
170b778f 1807 if (!stop_timer)
55d402d8 1808 add_timer(&udc_timer);
55d402d8
TD
1809 }
1810
1811 } else
1812 set_rde = -1; /* RDE was set by udc_queue() */
1813 spin_unlock_irq(&udc_irq_spinlock);
1814 if (stop_timer)
1815 complete(&on_exit);
1816
1817}
1818
1819/* Handle halt state, used in stall poll timer */
1820static void udc_handle_halt_state(struct udc_ep *ep)
1821{
1822 u32 tmp;
1823 /* set stall as long not halted */
1824 if (ep->halted == 1) {
1825 tmp = readl(&ep->regs->ctl);
1826 /* STALL cleared ? */
1827 if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
1828 /*
1829 * FIXME: MSC spec requires that stall remains
1830 * even on receivng of CLEAR_FEATURE HALT. So
1831 * we would set STALL again here to be compliant.
1832 * But with current mass storage drivers this does
1833 * not work (would produce endless host retries).
1834 * So we clear halt on CLEAR_FEATURE.
1835 *
1836 DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
1837 tmp |= AMD_BIT(UDC_EPCTL_S);
1838 writel(tmp, &ep->regs->ctl);*/
1839
1840 /* clear NAK by writing CNAK */
1841 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1842 writel(tmp, &ep->regs->ctl);
1843 ep->halted = 0;
1844 UDC_QUEUE_CNAK(ep, ep->num);
1845 }
1846 }
1847}
1848
1849/* Stall timer callback to poll S bit and set it again after */
1850static void udc_pollstall_timer_function(unsigned long v)
1851{
1852 struct udc_ep *ep;
1853 int halted = 0;
1854
1855 spin_lock_irq(&udc_stall_spinlock);
1856 /*
1857 * only one IN and OUT endpoints are handled
1858 * IN poll stall
1859 */
1860 ep = &udc->ep[UDC_EPIN_IX];
1861 udc_handle_halt_state(ep);
1862 if (ep->halted)
1863 halted = 1;
1864 /* OUT poll stall */
1865 ep = &udc->ep[UDC_EPOUT_IX];
1866 udc_handle_halt_state(ep);
1867 if (ep->halted)
1868 halted = 1;
1869
1870 /* setup timer again when still halted */
1871 if (!stop_pollstall_timer && halted) {
1872 udc_pollstall_timer.expires = jiffies +
1873 HZ * UDC_POLLSTALL_TIMER_USECONDS
1874 / (1000 * 1000);
1875 add_timer(&udc_pollstall_timer);
1876 }
1877 spin_unlock_irq(&udc_stall_spinlock);
1878
1879 if (stop_pollstall_timer)
1880 complete(&on_pollstall_exit);
1881}
1882
1883/* Inits endpoint 0 so that SETUP packets are processed */
1884static void activate_control_endpoints(struct udc *dev)
1885{
1886 u32 tmp;
1887
1888 DBG(dev, "activate_control_endpoints\n");
1889
1890 /* flush fifo */
1891 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1892 tmp |= AMD_BIT(UDC_EPCTL_F);
1893 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1894
1895 /* set ep0 directions */
1896 dev->ep[UDC_EP0IN_IX].in = 1;
1897 dev->ep[UDC_EP0OUT_IX].in = 0;
1898
1899 /* set buffer size (tx fifo entries) of EP0_IN */
1900 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1901 if (dev->gadget.speed == USB_SPEED_FULL)
1902 tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
1903 UDC_EPIN_BUFF_SIZE);
1904 else if (dev->gadget.speed == USB_SPEED_HIGH)
1905 tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
1906 UDC_EPIN_BUFF_SIZE);
1907 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1908
1909 /* set max packet size of EP0_IN */
1910 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1911 if (dev->gadget.speed == USB_SPEED_FULL)
1912 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
1913 UDC_EP_MAX_PKT_SIZE);
1914 else if (dev->gadget.speed == USB_SPEED_HIGH)
1915 tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
1916 UDC_EP_MAX_PKT_SIZE);
1917 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1918
1919 /* set max packet size of EP0_OUT */
1920 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1921 if (dev->gadget.speed == USB_SPEED_FULL)
1922 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1923 UDC_EP_MAX_PKT_SIZE);
1924 else if (dev->gadget.speed == USB_SPEED_HIGH)
1925 tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1926 UDC_EP_MAX_PKT_SIZE);
1927 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1928
1929 /* set max packet size of EP0 in UDC CSR */
1930 tmp = readl(&dev->csr->ne[0]);
1931 if (dev->gadget.speed == USB_SPEED_FULL)
1932 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1933 UDC_CSR_NE_MAX_PKT);
1934 else if (dev->gadget.speed == USB_SPEED_HIGH)
1935 tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1936 UDC_CSR_NE_MAX_PKT);
1937 writel(tmp, &dev->csr->ne[0]);
1938
1939 if (use_dma) {
1940 dev->ep[UDC_EP0OUT_IX].td->status |=
1941 AMD_BIT(UDC_DMA_OUT_STS_L);
1942 /* write dma desc address */
1943 writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
1944 &dev->ep[UDC_EP0OUT_IX].regs->subptr);
1945 writel(dev->ep[UDC_EP0OUT_IX].td_phys,
1946 &dev->ep[UDC_EP0OUT_IX].regs->desptr);
1947 /* stop RDE timer */
1948 if (timer_pending(&udc_timer)) {
1949 set_rde = 0;
1950 mod_timer(&udc_timer, jiffies - 1);
1951 }
1952 /* stop pollstall timer */
170b778f 1953 if (timer_pending(&udc_pollstall_timer))
55d402d8 1954 mod_timer(&udc_pollstall_timer, jiffies - 1);
55d402d8
TD
1955 /* enable DMA */
1956 tmp = readl(&dev->regs->ctl);
1957 tmp |= AMD_BIT(UDC_DEVCTL_MODE)
1958 | AMD_BIT(UDC_DEVCTL_RDE)
1959 | AMD_BIT(UDC_DEVCTL_TDE);
170b778f 1960 if (use_dma_bufferfill_mode)
55d402d8 1961 tmp |= AMD_BIT(UDC_DEVCTL_BF);
170b778f 1962 else if (use_dma_ppb_du)
55d402d8 1963 tmp |= AMD_BIT(UDC_DEVCTL_DU);
55d402d8
TD
1964 writel(tmp, &dev->regs->ctl);
1965 }
1966
1967 /* clear NAK by writing CNAK for EP0IN */
1968 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1969 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1970 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1971 dev->ep[UDC_EP0IN_IX].naking = 0;
1972 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
1973
1974 /* clear NAK by writing CNAK for EP0OUT */
1975 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
1976 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1977 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
1978 dev->ep[UDC_EP0OUT_IX].naking = 0;
1979 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
1980}
1981
1982/* Make endpoint 0 ready for control traffic */
1983static int setup_ep0(struct udc *dev)
1984{
1985 activate_control_endpoints(dev);
1986 /* enable ep0 interrupts */
1987 udc_enable_ep0_interrupts(dev);
1988 /* enable device setup interrupts */
1989 udc_enable_dev_setup_interrupts(dev);
1990
1991 return 0;
1992}
1993
1994/* Called by gadget driver to register itself */
45005f69
FB
1995static int amd5536_udc_start(struct usb_gadget *g,
1996 struct usb_gadget_driver *driver)
55d402d8 1997{
45005f69 1998 struct udc *dev = to_amd5536_udc(g);
55d402d8
TD
1999 u32 tmp;
2000
55d402d8
TD
2001 driver->driver.bus = NULL;
2002 dev->driver = driver;
55d402d8 2003
55d402d8
TD
2004 /* Some gadget drivers use both ep0 directions.
2005 * NOTE: to gadget driver, ep0 is just one endpoint...
2006 */
2007 dev->ep[UDC_EP0OUT_IX].ep.driver_data =
2008 dev->ep[UDC_EP0IN_IX].ep.driver_data;
2009
55d402d8
TD
2010 /* get ready for ep0 traffic */
2011 setup_ep0(dev);
2012
2013 /* clear SD */
2014 tmp = readl(&dev->regs->ctl);
2015 tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
2016 writel(tmp, &dev->regs->ctl);
2017
2018 usb_connect(dev);
2019
2020 return 0;
2021}
55d402d8
TD
2022
2023/* shutdown requests and disconnect from gadget */
2024static void
2025shutdown(struct udc *dev, struct usb_gadget_driver *driver)
2026__releases(dev->lock)
2027__acquires(dev->lock)
2028{
2029 int tmp;
2030
c5deb832
TD
2031 /* empty queues and init hardware */
2032 udc_basic_init(dev);
45005f69 2033
c5deb832
TD
2034 for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
2035 empty_req_queue(&dev->ep[tmp]);
2036
55d402d8
TD
2037 udc_setup_endpoints(dev);
2038}
2039
2040/* Called by gadget driver to unregister itself */
22835b80 2041static int amd5536_udc_stop(struct usb_gadget *g)
55d402d8 2042{
45005f69
FB
2043 struct udc *dev = to_amd5536_udc(g);
2044 unsigned long flags;
55d402d8
TD
2045 u32 tmp;
2046
55d402d8
TD
2047 spin_lock_irqsave(&dev->lock, flags);
2048 udc_mask_unused_interrupts(dev);
21090f06 2049 shutdown(dev, NULL);
55d402d8
TD
2050 spin_unlock_irqrestore(&dev->lock, flags);
2051
55d402d8
TD
2052 dev->driver = NULL;
2053
2054 /* set SD */
2055 tmp = readl(&dev->regs->ctl);
2056 tmp |= AMD_BIT(UDC_DEVCTL_SD);
2057 writel(tmp, &dev->regs->ctl);
2058
55d402d8
TD
2059 return 0;
2060}
55d402d8
TD
2061
2062/* Clear pending NAK bits */
2063static void udc_process_cnak_queue(struct udc *dev)
2064{
2065 u32 tmp;
2066 u32 reg;
2067
2068 /* check epin's */
2069 DBG(dev, "CNAK pending queue processing\n");
2070 for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
2071 if (cnak_pending & (1 << tmp)) {
2072 DBG(dev, "CNAK pending for ep%d\n", tmp);
2073 /* clear NAK by writing CNAK */
2074 reg = readl(&dev->ep[tmp].regs->ctl);
2075 reg |= AMD_BIT(UDC_EPCTL_CNAK);
2076 writel(reg, &dev->ep[tmp].regs->ctl);
2077 dev->ep[tmp].naking = 0;
2078 UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
2079 }
2080 }
2081 /* ... and ep0out */
2082 if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
2083 DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
2084 /* clear NAK by writing CNAK */
2085 reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2086 reg |= AMD_BIT(UDC_EPCTL_CNAK);
2087 writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2088 dev->ep[UDC_EP0OUT_IX].naking = 0;
2089 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
2090 dev->ep[UDC_EP0OUT_IX].num);
2091 }
2092}
2093
2094/* Enabling RX DMA after setup packet */
2095static void udc_ep0_set_rde(struct udc *dev)
2096{
2097 if (use_dma) {
2098 /*
2099 * only enable RXDMA when no data endpoint enabled
2100 * or data is queued
2101 */
2102 if (!dev->data_ep_enabled || dev->data_ep_queued) {
2103 udc_set_rde(dev);
2104 } else {
2105 /*
2106 * setup timer for enabling RDE (to not enable
2107 * RXFIFO DMA for data endpoints to early)
2108 */
2109 if (set_rde != 0 && !timer_pending(&udc_timer)) {
2110 udc_timer.expires =
2111 jiffies + HZ/UDC_RDE_TIMER_DIV;
2112 set_rde = 1;
170b778f 2113 if (!stop_timer)
55d402d8 2114 add_timer(&udc_timer);
55d402d8
TD
2115 }
2116 }
2117 }
2118}
2119
2120
2121/* Interrupt handler for data OUT traffic */
2122static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
2123{
2124 irqreturn_t ret_val = IRQ_NONE;
2125 u32 tmp;
2126 struct udc_ep *ep;
2127 struct udc_request *req;
2128 unsigned int count;
2129 struct udc_data_dma *td = NULL;
2130 unsigned dma_done;
2131
2132 VDBG(dev, "ep%d irq\n", ep_ix);
2133 ep = &dev->ep[ep_ix];
2134
2135 tmp = readl(&ep->regs->sts);
2136 if (use_dma) {
2137 /* BNA event ? */
2138 if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
5647a149 2139 DBG(dev, "BNA ep%dout occurred - DESPTR = %x\n",
55d402d8
TD
2140 ep->num, readl(&ep->regs->desptr));
2141 /* clear BNA */
2142 writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
2143 if (!ep->cancel_transfer)
2144 ep->bna_occurred = 1;
2145 else
2146 ep->cancel_transfer = 0;
2147 ret_val = IRQ_HANDLED;
2148 goto finished;
2149 }
2150 }
2151 /* HE event ? */
2152 if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
25985edc 2153 dev_err(&dev->pdev->dev, "HE ep%dout occurred\n", ep->num);
55d402d8
TD
2154
2155 /* clear HE */
2156 writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2157 ret_val = IRQ_HANDLED;
2158 goto finished;
2159 }
2160
2161 if (!list_empty(&ep->queue)) {
2162
2163 /* next request */
2164 req = list_entry(ep->queue.next,
2165 struct udc_request, queue);
2166 } else {
2167 req = NULL;
2168 udc_rxfifo_pending = 1;
2169 }
2170 VDBG(dev, "req = %p\n", req);
2171 /* fifo mode */
2172 if (!use_dma) {
2173
2174 /* read fifo */
2175 if (req && udc_rxfifo_read(ep, req)) {
2176 ret_val = IRQ_HANDLED;
2177
2178 /* finish */
2179 complete_req(ep, req, 0);
2180 /* next request */
2181 if (!list_empty(&ep->queue) && !ep->halted) {
2182 req = list_entry(ep->queue.next,
2183 struct udc_request, queue);
2184 } else
2185 req = NULL;
2186 }
2187
2188 /* DMA */
2189 } else if (!ep->cancel_transfer && req != NULL) {
2190 ret_val = IRQ_HANDLED;
2191
2192 /* check for DMA done */
2193 if (!use_dma_ppb) {
2194 dma_done = AMD_GETBITS(req->td_data->status,
2195 UDC_DMA_OUT_STS_BS);
2196 /* packet per buffer mode - rx bytes */
2197 } else {
2198 /*
2199 * if BNA occurred then recover desc. from
2200 * BNA dummy desc.
2201 */
2202 if (ep->bna_occurred) {
2203 VDBG(dev, "Recover desc. from BNA dummy\n");
2204 memcpy(req->td_data, ep->bna_dummy_req->td_data,
2205 sizeof(struct udc_data_dma));
2206 ep->bna_occurred = 0;
2207 udc_init_bna_dummy(ep->req);
2208 }
2209 td = udc_get_last_dma_desc(req);
2210 dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
2211 }
2212 if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
2213 /* buffer fill mode - rx bytes */
2214 if (!use_dma_ppb) {
2215 /* received number bytes */
2216 count = AMD_GETBITS(req->td_data->status,
2217 UDC_DMA_OUT_STS_RXBYTES);
2218 VDBG(dev, "rx bytes=%u\n", count);
2219 /* packet per buffer mode - rx bytes */
2220 } else {
2221 VDBG(dev, "req->td_data=%p\n", req->td_data);
2222 VDBG(dev, "last desc = %p\n", td);
2223 /* received number bytes */
2224 if (use_dma_ppb_du) {
2225 /* every desc. counts bytes */
2226 count = udc_get_ppbdu_rxbytes(req);
2227 } else {
2228 /* last desc. counts bytes */
2229 count = AMD_GETBITS(td->status,
2230 UDC_DMA_OUT_STS_RXBYTES);
2231 if (!count && req->req.length
2232 == UDC_DMA_MAXPACKET) {
2233 /*
2234 * on 64k packets the RXBYTES
2235 * field is zero
2236 */
2237 count = UDC_DMA_MAXPACKET;
2238 }
2239 }
2240 VDBG(dev, "last desc rx bytes=%u\n", count);
2241 }
2242
2243 tmp = req->req.length - req->req.actual;
2244 if (count > tmp) {
2245 if ((tmp % ep->ep.maxpacket) != 0) {
2246 DBG(dev, "%s: rx %db, space=%db\n",
2247 ep->ep.name, count, tmp);
2248 req->req.status = -EOVERFLOW;
2249 }
2250 count = tmp;
2251 }
2252 req->req.actual += count;
2253 req->dma_going = 0;
2254 /* complete request */
2255 complete_req(ep, req, 0);
2256
2257 /* next request */
2258 if (!list_empty(&ep->queue) && !ep->halted) {
2259 req = list_entry(ep->queue.next,
2260 struct udc_request,
2261 queue);
2262 /*
2263 * DMA may be already started by udc_queue()
2264 * called by gadget drivers completion
2265 * routine. This happens when queue
2266 * holds one request only.
2267 */
2268 if (req->dma_going == 0) {
2269 /* next dma */
2270 if (prep_dma(ep, req, GFP_ATOMIC) != 0)
2271 goto finished;
2272 /* write desc pointer */
2273 writel(req->td_phys,
2274 &ep->regs->desptr);
2275 req->dma_going = 1;
2276 /* enable DMA */
2277 udc_set_rde(dev);
2278 }
2279 } else {
2280 /*
2281 * implant BNA dummy descriptor to allow
2282 * RXFIFO opening by RDE
2283 */
2284 if (ep->bna_dummy_req) {
2285 /* write desc pointer */
2286 writel(ep->bna_dummy_req->td_phys,
2287 &ep->regs->desptr);
2288 ep->bna_occurred = 0;
2289 }
2290
2291 /*
2292 * schedule timer for setting RDE if queue
2293 * remains empty to allow ep0 packets pass
2294 * through
2295 */
2296 if (set_rde != 0
2297 && !timer_pending(&udc_timer)) {
2298 udc_timer.expires =
2299 jiffies
2300 + HZ*UDC_RDE_TIMER_SECONDS;
2301 set_rde = 1;
170b778f 2302 if (!stop_timer)
55d402d8 2303 add_timer(&udc_timer);
55d402d8
TD
2304 }
2305 if (ep->num != UDC_EP0OUT_IX)
2306 dev->data_ep_queued = 0;
2307 }
2308
2309 } else {
2310 /*
2311 * RX DMA must be reenabled for each desc in PPBDU mode
2312 * and must be enabled for PPBNDU mode in case of BNA
2313 */
2314 udc_set_rde(dev);
2315 }
2316
2317 } else if (ep->cancel_transfer) {
2318 ret_val = IRQ_HANDLED;
2319 ep->cancel_transfer = 0;
2320 }
2321
2322 /* check pending CNAKS */
2323 if (cnak_pending) {
2324 /* CNAk processing when rxfifo empty only */
170b778f 2325 if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
55d402d8 2326 udc_process_cnak_queue(dev);
55d402d8
TD
2327 }
2328
2329 /* clear OUT bits in ep status */
2330 writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
2331finished:
2332 return ret_val;
2333}
2334
2335/* Interrupt handler for data IN traffic */
2336static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
2337{
2338 irqreturn_t ret_val = IRQ_NONE;
2339 u32 tmp;
2340 u32 epsts;
2341 struct udc_ep *ep;
2342 struct udc_request *req;
2343 struct udc_data_dma *td;
2344 unsigned dma_done;
2345 unsigned len;
2346
2347 ep = &dev->ep[ep_ix];
2348
2349 epsts = readl(&ep->regs->sts);
2350 if (use_dma) {
2351 /* BNA ? */
2352 if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
2353 dev_err(&dev->pdev->dev,
5647a149 2354 "BNA ep%din occurred - DESPTR = %08lx\n",
55d402d8
TD
2355 ep->num,
2356 (unsigned long) readl(&ep->regs->desptr));
2357
2358 /* clear BNA */
2359 writel(epsts, &ep->regs->sts);
2360 ret_val = IRQ_HANDLED;
2361 goto finished;
2362 }
2363 }
2364 /* HE event ? */
2365 if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
2366 dev_err(&dev->pdev->dev,
5647a149 2367 "HE ep%dn occurred - DESPTR = %08lx\n",
55d402d8
TD
2368 ep->num, (unsigned long) readl(&ep->regs->desptr));
2369
2370 /* clear HE */
2371 writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2372 ret_val = IRQ_HANDLED;
2373 goto finished;
2374 }
2375
2376 /* DMA completion */
2377 if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
2378 VDBG(dev, "TDC set- completion\n");
2379 ret_val = IRQ_HANDLED;
2380 if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
2381 req = list_entry(ep->queue.next,
2382 struct udc_request, queue);
058e698b 2383 /*
25985edc 2384 * length bytes transferred
058e698b
JL
2385 * check dma done of last desc. in PPBDU mode
2386 */
2387 if (use_dma_ppb_du) {
2388 td = udc_get_last_dma_desc(req);
2389 if (td) {
2390 dma_done =
2391 AMD_GETBITS(td->status,
2392 UDC_DMA_IN_STS_BS);
2393 /* don't care DMA done */
55d402d8
TD
2394 req->req.actual = req->req.length;
2395 }
058e698b
JL
2396 } else {
2397 /* assume all bytes transferred */
2398 req->req.actual = req->req.length;
2399 }
55d402d8 2400
058e698b
JL
2401 if (req->req.actual == req->req.length) {
2402 /* complete req */
2403 complete_req(ep, req, 0);
2404 req->dma_going = 0;
2405 /* further request available ? */
2406 if (list_empty(&ep->queue)) {
2407 /* disable interrupt */
2408 tmp = readl(&dev->regs->ep_irqmsk);
2409 tmp |= AMD_BIT(ep->num);
2410 writel(tmp, &dev->regs->ep_irqmsk);
55d402d8
TD
2411 }
2412 }
2413 }
2414 ep->cancel_transfer = 0;
2415
2416 }
2417 /*
2418 * status reg has IN bit set and TDC not set (if TDC was handled,
2419 * IN must not be handled (UDC defect) ?
2420 */
2421 if ((epsts & AMD_BIT(UDC_EPSTS_IN))
2422 && !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
2423 ret_val = IRQ_HANDLED;
2424 if (!list_empty(&ep->queue)) {
2425 /* next request */
2426 req = list_entry(ep->queue.next,
2427 struct udc_request, queue);
2428 /* FIFO mode */
2429 if (!use_dma) {
2430 /* write fifo */
2431 udc_txfifo_write(ep, &req->req);
2432 len = req->req.length - req->req.actual;
1435db48
CR
2433 if (len > ep->ep.maxpacket)
2434 len = ep->ep.maxpacket;
2435 req->req.actual += len;
55d402d8
TD
2436 if (req->req.actual == req->req.length
2437 || (len != ep->ep.maxpacket)) {
2438 /* complete req */
2439 complete_req(ep, req, 0);
2440 }
2441 /* DMA */
2442 } else if (req && !req->dma_going) {
2443 VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
2444 req, req->td_data);
2445 if (req->td_data) {
2446
2447 req->dma_going = 1;
2448
2449 /*
2450 * unset L bit of first desc.
2451 * for chain
2452 */
2453 if (use_dma_ppb && req->req.length >
2454 ep->ep.maxpacket) {
2455 req->td_data->status &=
2456 AMD_CLEAR_BIT(
2457 UDC_DMA_IN_STS_L);
2458 }
2459
2460 /* write desc pointer */
2461 writel(req->td_phys, &ep->regs->desptr);
2462
2463 /* set HOST READY */
2464 req->td_data->status =
2465 AMD_ADDBITS(
2466 req->td_data->status,
2467 UDC_DMA_IN_STS_BS_HOST_READY,
2468 UDC_DMA_IN_STS_BS);
2469
2470 /* set poll demand bit */
2471 tmp = readl(&ep->regs->ctl);
2472 tmp |= AMD_BIT(UDC_EPCTL_P);
2473 writel(tmp, &ep->regs->ctl);
2474 }
2475 }
2476
c5deb832
TD
2477 } else if (!use_dma && ep->in) {
2478 /* disable interrupt */
2479 tmp = readl(
2480 &dev->regs->ep_irqmsk);
2481 tmp |= AMD_BIT(ep->num);
2482 writel(tmp,
2483 &dev->regs->ep_irqmsk);
55d402d8
TD
2484 }
2485 }
2486 /* clear status bits */
2487 writel(epsts, &ep->regs->sts);
2488
2489finished:
2490 return ret_val;
2491
2492}
2493
2494/* Interrupt handler for Control OUT traffic */
2495static irqreturn_t udc_control_out_isr(struct udc *dev)
2496__releases(dev->lock)
2497__acquires(dev->lock)
2498{
2499 irqreturn_t ret_val = IRQ_NONE;
2500 u32 tmp;
2501 int setup_supported;
2502 u32 count;
2503 int set = 0;
2504 struct udc_ep *ep;
2505 struct udc_ep *ep_tmp;
2506
2507 ep = &dev->ep[UDC_EP0OUT_IX];
2508
2509 /* clear irq */
2510 writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
2511
2512 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2513 /* check BNA and clear if set */
2514 if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2515 VDBG(dev, "ep0: BNA set\n");
2516 writel(AMD_BIT(UDC_EPSTS_BNA),
2517 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2518 ep->bna_occurred = 1;
2519 ret_val = IRQ_HANDLED;
2520 goto finished;
2521 }
2522
2523 /* type of data: SETUP or DATA 0 bytes */
2524 tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
2525 VDBG(dev, "data_typ = %x\n", tmp);
2526
2527 /* setup data */
2528 if (tmp == UDC_EPSTS_OUT_SETUP) {
2529 ret_val = IRQ_HANDLED;
2530
2531 ep->dev->stall_ep0in = 0;
2532 dev->waiting_zlp_ack_ep0in = 0;
2533
2534 /* set NAK for EP0_IN */
2535 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2536 tmp |= AMD_BIT(UDC_EPCTL_SNAK);
2537 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2538 dev->ep[UDC_EP0IN_IX].naking = 1;
2539 /* get setup data */
2540 if (use_dma) {
2541
2542 /* clear OUT bits in ep status */
2543 writel(UDC_EPSTS_OUT_CLEAR,
2544 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2545
2546 setup_data.data[0] =
2547 dev->ep[UDC_EP0OUT_IX].td_stp->data12;
2548 setup_data.data[1] =
2549 dev->ep[UDC_EP0OUT_IX].td_stp->data34;
2550 /* set HOST READY */
2551 dev->ep[UDC_EP0OUT_IX].td_stp->status =
2552 UDC_DMA_STP_STS_BS_HOST_READY;
2553 } else {
2554 /* read fifo */
2555 udc_rxfifo_read_dwords(dev, setup_data.data, 2);
2556 }
2557
2558 /* determine direction of control data */
2559 if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
2560 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
2561 /* enable RDE */
2562 udc_ep0_set_rde(dev);
2563 set = 0;
2564 } else {
2565 dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
2566 /*
2567 * implant BNA dummy descriptor to allow RXFIFO opening
2568 * by RDE
2569 */
2570 if (ep->bna_dummy_req) {
2571 /* write desc pointer */
2572 writel(ep->bna_dummy_req->td_phys,
2573 &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2574 ep->bna_occurred = 0;
2575 }
2576
2577 set = 1;
2578 dev->ep[UDC_EP0OUT_IX].naking = 1;
2579 /*
2580 * setup timer for enabling RDE (to not enable
2581 * RXFIFO DMA for data to early)
2582 */
2583 set_rde = 1;
2584 if (!timer_pending(&udc_timer)) {
2585 udc_timer.expires = jiffies +
2586 HZ/UDC_RDE_TIMER_DIV;
170b778f 2587 if (!stop_timer)
55d402d8 2588 add_timer(&udc_timer);
55d402d8
TD
2589 }
2590 }
2591
2592 /*
2593 * mass storage reset must be processed here because
2594 * next packet may be a CLEAR_FEATURE HALT which would not
2595 * clear the stall bit when no STALL handshake was received
2596 * before (autostall can cause this)
2597 */
2598 if (setup_data.data[0] == UDC_MSCRES_DWORD0
2599 && setup_data.data[1] == UDC_MSCRES_DWORD1) {
2600 DBG(dev, "MSC Reset\n");
2601 /*
2602 * clear stall bits
2603 * only one IN and OUT endpoints are handled
2604 */
2605 ep_tmp = &udc->ep[UDC_EPIN_IX];
2606 udc_set_halt(&ep_tmp->ep, 0);
2607 ep_tmp = &udc->ep[UDC_EPOUT_IX];
2608 udc_set_halt(&ep_tmp->ep, 0);
2609 }
2610
2611 /* call gadget with setup data received */
2612 spin_unlock(&dev->lock);
2613 setup_supported = dev->driver->setup(&dev->gadget,
2614 &setup_data.request);
2615 spin_lock(&dev->lock);
2616
2617 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2618 /* ep0 in returns data (not zlp) on IN phase */
2619 if (setup_supported >= 0 && setup_supported <
2620 UDC_EP0IN_MAXPACKET) {
2621 /* clear NAK by writing CNAK in EP0_IN */
2622 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2623 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2624 dev->ep[UDC_EP0IN_IX].naking = 0;
2625 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
2626
2627 /* if unsupported request then stall */
2628 } else if (setup_supported < 0) {
2629 tmp |= AMD_BIT(UDC_EPCTL_S);
2630 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2631 } else
2632 dev->waiting_zlp_ack_ep0in = 1;
2633
2634
2635 /* clear NAK by writing CNAK in EP0_OUT */
2636 if (!set) {
2637 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2638 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2639 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2640 dev->ep[UDC_EP0OUT_IX].naking = 0;
2641 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
2642 }
2643
2644 if (!use_dma) {
2645 /* clear OUT bits in ep status */
2646 writel(UDC_EPSTS_OUT_CLEAR,
2647 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2648 }
2649
2650 /* data packet 0 bytes */
2651 } else if (tmp == UDC_EPSTS_OUT_DATA) {
2652 /* clear OUT bits in ep status */
2653 writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
2654
2655 /* get setup data: only 0 packet */
2656 if (use_dma) {
2657 /* no req if 0 packet, just reactivate */
2658 if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
2659 VDBG(dev, "ZLP\n");
2660
2661 /* set HOST READY */
2662 dev->ep[UDC_EP0OUT_IX].td->status =
2663 AMD_ADDBITS(
2664 dev->ep[UDC_EP0OUT_IX].td->status,
2665 UDC_DMA_OUT_STS_BS_HOST_READY,
2666 UDC_DMA_OUT_STS_BS);
2667 /* enable RDE */
2668 udc_ep0_set_rde(dev);
2669 ret_val = IRQ_HANDLED;
2670
2671 } else {
2672 /* control write */
2673 ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2674 /* re-program desc. pointer for possible ZLPs */
2675 writel(dev->ep[UDC_EP0OUT_IX].td_phys,
2676 &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2677 /* enable RDE */
2678 udc_ep0_set_rde(dev);
2679 }
2680 } else {
2681
2682 /* received number bytes */
2683 count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2684 count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
2685 /* out data for fifo mode not working */
2686 count = 0;
2687
2688 /* 0 packet or real data ? */
2689 if (count != 0) {
2690 ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2691 } else {
2692 /* dummy read confirm */
2693 readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
2694 ret_val = IRQ_HANDLED;
2695 }
2696 }
2697 }
2698
2699 /* check pending CNAKS */
2700 if (cnak_pending) {
2701 /* CNAk processing when rxfifo empty only */
170b778f 2702 if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
55d402d8 2703 udc_process_cnak_queue(dev);
55d402d8
TD
2704 }
2705
2706finished:
2707 return ret_val;
2708}
2709
2710/* Interrupt handler for Control IN traffic */
2711static irqreturn_t udc_control_in_isr(struct udc *dev)
2712{
2713 irqreturn_t ret_val = IRQ_NONE;
2714 u32 tmp;
2715 struct udc_ep *ep;
2716 struct udc_request *req;
2717 unsigned len;
2718
2719 ep = &dev->ep[UDC_EP0IN_IX];
2720
2721 /* clear irq */
2722 writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
2723
2724 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
2725 /* DMA completion */
2726 if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
5647a149 2727 VDBG(dev, "isr: TDC clear\n");
55d402d8
TD
2728 ret_val = IRQ_HANDLED;
2729
2730 /* clear TDC bit */
2731 writel(AMD_BIT(UDC_EPSTS_TDC),
2732 &dev->ep[UDC_EP0IN_IX].regs->sts);
2733
2734 /* status reg has IN bit set ? */
2735 } else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
2736 ret_val = IRQ_HANDLED;
2737
2738 if (ep->dma) {
2739 /* clear IN bit */
2740 writel(AMD_BIT(UDC_EPSTS_IN),
2741 &dev->ep[UDC_EP0IN_IX].regs->sts);
2742 }
2743 if (dev->stall_ep0in) {
2744 DBG(dev, "stall ep0in\n");
2745 /* halt ep0in */
2746 tmp = readl(&ep->regs->ctl);
2747 tmp |= AMD_BIT(UDC_EPCTL_S);
2748 writel(tmp, &ep->regs->ctl);
2749 } else {
2750 if (!list_empty(&ep->queue)) {
2751 /* next request */
2752 req = list_entry(ep->queue.next,
2753 struct udc_request, queue);
2754
2755 if (ep->dma) {
2756 /* write desc pointer */
2757 writel(req->td_phys, &ep->regs->desptr);
2758 /* set HOST READY */
2759 req->td_data->status =
2760 AMD_ADDBITS(
2761 req->td_data->status,
2762 UDC_DMA_STP_STS_BS_HOST_READY,
2763 UDC_DMA_STP_STS_BS);
2764
2765 /* set poll demand bit */
2766 tmp =
2767 readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2768 tmp |= AMD_BIT(UDC_EPCTL_P);
2769 writel(tmp,
2770 &dev->ep[UDC_EP0IN_IX].regs->ctl);
2771
2772 /* all bytes will be transferred */
2773 req->req.actual = req->req.length;
2774
2775 /* complete req */
2776 complete_req(ep, req, 0);
2777
2778 } else {
2779 /* write fifo */
2780 udc_txfifo_write(ep, &req->req);
2781
25985edc 2782 /* lengh bytes transferred */
55d402d8
TD
2783 len = req->req.length - req->req.actual;
2784 if (len > ep->ep.maxpacket)
2785 len = ep->ep.maxpacket;
2786
2787 req->req.actual += len;
2788 if (req->req.actual == req->req.length
2789 || (len != ep->ep.maxpacket)) {
2790 /* complete req */
2791 complete_req(ep, req, 0);
2792 }
2793 }
2794
2795 }
2796 }
2797 ep->halted = 0;
2798 dev->stall_ep0in = 0;
2799 if (!ep->dma) {
2800 /* clear IN bit */
2801 writel(AMD_BIT(UDC_EPSTS_IN),
2802 &dev->ep[UDC_EP0IN_IX].regs->sts);
2803 }
2804 }
2805
2806 return ret_val;
2807}
2808
2809
2810/* Interrupt handler for global device events */
2811static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
2812__releases(dev->lock)
2813__acquires(dev->lock)
2814{
2815 irqreturn_t ret_val = IRQ_NONE;
2816 u32 tmp;
2817 u32 cfg;
2818 struct udc_ep *ep;
2819 u16 i;
2820 u8 udc_csr_epix;
2821
2822 /* SET_CONFIG irq ? */
2823 if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
2824 ret_val = IRQ_HANDLED;
2825
2826 /* read config value */
2827 tmp = readl(&dev->regs->sts);
2828 cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
2829 DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
2830 dev->cur_config = cfg;
2831 dev->set_cfg_not_acked = 1;
2832
2833 /* make usb request for gadget driver */
2834 memset(&setup_data, 0 , sizeof(union udc_setup_data));
2835 setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
fd05e720 2836 setup_data.request.wValue = cpu_to_le16(dev->cur_config);
55d402d8
TD
2837
2838 /* programm the NE registers */
2839 for (i = 0; i < UDC_EP_NUM; i++) {
2840 ep = &dev->ep[i];
2841 if (ep->in) {
2842
2843 /* ep ix in UDC CSR register space */
2844 udc_csr_epix = ep->num;
2845
2846
2847 /* OUT ep */
2848 } else {
2849 /* ep ix in UDC CSR register space */
2850 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2851 }
2852
2853 tmp = readl(&dev->csr->ne[udc_csr_epix]);
2854 /* ep cfg */
2855 tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
2856 UDC_CSR_NE_CFG);
2857 /* write reg */
2858 writel(tmp, &dev->csr->ne[udc_csr_epix]);
2859
2860 /* clear stall bits */
2861 ep->halted = 0;
2862 tmp = readl(&ep->regs->ctl);
2863 tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2864 writel(tmp, &ep->regs->ctl);
2865 }
2866 /* call gadget zero with setup data received */
2867 spin_unlock(&dev->lock);
2868 tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2869 spin_lock(&dev->lock);
2870
2871 } /* SET_INTERFACE ? */
2872 if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
2873 ret_val = IRQ_HANDLED;
2874
2875 dev->set_cfg_not_acked = 1;
2876 /* read interface and alt setting values */
2877 tmp = readl(&dev->regs->sts);
2878 dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
2879 dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
2880
2881 /* make usb request for gadget driver */
2882 memset(&setup_data, 0 , sizeof(union udc_setup_data));
2883 setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
2884 setup_data.request.bRequestType = USB_RECIP_INTERFACE;
fd05e720
AV
2885 setup_data.request.wValue = cpu_to_le16(dev->cur_alt);
2886 setup_data.request.wIndex = cpu_to_le16(dev->cur_intf);
55d402d8
TD
2887
2888 DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
2889 dev->cur_alt, dev->cur_intf);
2890
2891 /* programm the NE registers */
2892 for (i = 0; i < UDC_EP_NUM; i++) {
2893 ep = &dev->ep[i];
2894 if (ep->in) {
2895
2896 /* ep ix in UDC CSR register space */
2897 udc_csr_epix = ep->num;
2898
2899
2900 /* OUT ep */
2901 } else {
2902 /* ep ix in UDC CSR register space */
2903 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2904 }
2905
2906 /* UDC CSR reg */
2907 /* set ep values */
2908 tmp = readl(&dev->csr->ne[udc_csr_epix]);
2909 /* ep interface */
2910 tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
2911 UDC_CSR_NE_INTF);
2912 /* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
2913 /* ep alt */
2914 tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
2915 UDC_CSR_NE_ALT);
2916 /* write reg */
2917 writel(tmp, &dev->csr->ne[udc_csr_epix]);
2918
2919 /* clear stall bits */
2920 ep->halted = 0;
2921 tmp = readl(&ep->regs->ctl);
2922 tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2923 writel(tmp, &ep->regs->ctl);
2924 }
2925
2926 /* call gadget zero with setup data received */
2927 spin_unlock(&dev->lock);
2928 tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2929 spin_lock(&dev->lock);
2930
2931 } /* USB reset */
2932 if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
2933 DBG(dev, "USB Reset interrupt\n");
2934 ret_val = IRQ_HANDLED;
2935
2936 /* allow soft reset when suspend occurs */
2937 soft_reset_occured = 0;
2938
2939 dev->waiting_zlp_ack_ep0in = 0;
2940 dev->set_cfg_not_acked = 0;
2941
2942 /* mask not needed interrupts */
2943 udc_mask_unused_interrupts(dev);
2944
2945 /* call gadget to resume and reset configs etc. */
2946 spin_unlock(&dev->lock);
2947 if (dev->sys_suspended && dev->driver->resume) {
2948 dev->driver->resume(&dev->gadget);
2949 dev->sys_suspended = 0;
2950 }
107d13c7 2951 usb_gadget_udc_reset(&dev->gadget, dev->driver);
55d402d8
TD
2952 spin_lock(&dev->lock);
2953
2954 /* disable ep0 to empty req queue */
2955 empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2956 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
2957
2958 /* soft reset when rxfifo not empty */
2959 tmp = readl(&dev->regs->sts);
2960 if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2961 && !soft_reset_after_usbreset_occured) {
2962 udc_soft_reset(dev);
2963 soft_reset_after_usbreset_occured++;
2964 }
2965
2966 /*
2967 * DMA reset to kill potential old DMA hw hang,
2968 * POLL bit is already reset by ep_init() through
2969 * disconnect()
2970 */
2971 DBG(dev, "DMA machine reset\n");
2972 tmp = readl(&dev->regs->cfg);
2973 writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
2974 writel(tmp, &dev->regs->cfg);
2975
2976 /* put into initial config */
2977 udc_basic_init(dev);
2978
2979 /* enable device setup interrupts */
2980 udc_enable_dev_setup_interrupts(dev);
2981
2982 /* enable suspend interrupt */
2983 tmp = readl(&dev->regs->irqmsk);
2984 tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
2985 writel(tmp, &dev->regs->irqmsk);
2986
2987 } /* USB suspend */
2988 if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
2989 DBG(dev, "USB Suspend interrupt\n");
2990 ret_val = IRQ_HANDLED;
2991 if (dev->driver->suspend) {
2992 spin_unlock(&dev->lock);
2993 dev->sys_suspended = 1;
2994 dev->driver->suspend(&dev->gadget);
2995 spin_lock(&dev->lock);
2996 }
2997 } /* new speed ? */
2998 if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
2999 DBG(dev, "ENUM interrupt\n");
3000 ret_val = IRQ_HANDLED;
3001 soft_reset_after_usbreset_occured = 0;
3002
3003 /* disable ep0 to empty req queue */
3004 empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
3005 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
3006
3007 /* link up all endpoints */
3008 udc_setup_endpoints(dev);
e538dfda
MN
3009 dev_info(&dev->pdev->dev, "Connect: %s\n",
3010 usb_speed_string(dev->gadget.speed));
55d402d8
TD
3011
3012 /* init ep 0 */
3013 activate_control_endpoints(dev);
3014
3015 /* enable ep0 interrupts */
3016 udc_enable_ep0_interrupts(dev);
3017 }
3018 /* session valid change interrupt */
3019 if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
3020 DBG(dev, "USB SVC interrupt\n");
3021 ret_val = IRQ_HANDLED;
3022
3023 /* check that session is not valid to detect disconnect */
3024 tmp = readl(&dev->regs->sts);
3025 if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
3026 /* disable suspend interrupt */
3027 tmp = readl(&dev->regs->irqmsk);
3028 tmp |= AMD_BIT(UDC_DEVINT_US);
3029 writel(tmp, &dev->regs->irqmsk);
3030 DBG(dev, "USB Disconnect (session valid low)\n");
3031 /* cleanup on disconnect */
3032 usb_disconnect(udc);
3033 }
3034
3035 }
3036
3037 return ret_val;
3038}
3039
3040/* Interrupt Service Routine, see Linux Kernel Doc for parameters */
3041static irqreturn_t udc_irq(int irq, void *pdev)
3042{
3043 struct udc *dev = pdev;
3044 u32 reg;
3045 u16 i;
3046 u32 ep_irq;
3047 irqreturn_t ret_val = IRQ_NONE;
3048
3049 spin_lock(&dev->lock);
3050
3051 /* check for ep irq */
3052 reg = readl(&dev->regs->ep_irqsts);
3053 if (reg) {
3054 if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
3055 ret_val |= udc_control_out_isr(dev);
3056 if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
3057 ret_val |= udc_control_in_isr(dev);
3058
3059 /*
3060 * data endpoint
3061 * iterate ep's
3062 */
3063 for (i = 1; i < UDC_EP_NUM; i++) {
3064 ep_irq = 1 << i;
3065 if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
3066 continue;
3067
3068 /* clear irq status */
3069 writel(ep_irq, &dev->regs->ep_irqsts);
3070
3071 /* irq for out ep ? */
3072 if (i > UDC_EPIN_NUM)
3073 ret_val |= udc_data_out_isr(dev, i);
3074 else
3075 ret_val |= udc_data_in_isr(dev, i);
3076 }
3077
3078 }
3079
3080
3081 /* check for dev irq */
3082 reg = readl(&dev->regs->irqsts);
3083 if (reg) {
3084 /* clear irq */
3085 writel(reg, &dev->regs->irqsts);
3086 ret_val |= udc_dev_isr(dev, reg);
3087 }
3088
3089
3090 spin_unlock(&dev->lock);
3091 return ret_val;
3092}
3093
3094/* Tears down device */
3095static void gadget_release(struct device *pdev)
3096{
3097 struct amd5536udc *dev = dev_get_drvdata(pdev);
3098 kfree(dev);
3099}
3100
3101/* Cleanup on device remove */
3102static void udc_remove(struct udc *dev)
3103{
3104 /* remove timer */
3105 stop_timer++;
3106 if (timer_pending(&udc_timer))
3107 wait_for_completion(&on_exit);
3108 if (udc_timer.data)
3109 del_timer_sync(&udc_timer);
3110 /* remove pollstall timer */
3111 stop_pollstall_timer++;
3112 if (timer_pending(&udc_pollstall_timer))
3113 wait_for_completion(&on_pollstall_exit);
3114 if (udc_pollstall_timer.data)
3115 del_timer_sync(&udc_pollstall_timer);
3116 udc = NULL;
3117}
3118
580693bb
SM
3119/* free all the dma pools */
3120static void free_dma_pools(struct udc *dev)
3121{
3122 dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td,
3123 dev->ep[UDC_EP0OUT_IX].td_phys);
3124 dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
3125 dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3126 dma_pool_destroy(dev->stp_requests);
3127 dma_pool_destroy(dev->data_requests);
3128}
3129
55d402d8
TD
3130/* Reset all pci context */
3131static void udc_pci_remove(struct pci_dev *pdev)
3132{
3133 struct udc *dev;
3134
3135 dev = pci_get_drvdata(pdev);
3136
0f91349b 3137 usb_del_gadget_udc(&udc->gadget);
55d402d8 3138 /* gadget driver must not be registered */
2e1b7d0c
SM
3139 if (WARN_ON(dev->driver))
3140 return;
55d402d8
TD
3141
3142 /* dma pool cleanup */
f349dd3c 3143 free_dma_pools(dev);
55d402d8
TD
3144
3145 /* reset controller */
3146 writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
76c3727d
SM
3147 free_irq(pdev->irq, dev);
3148 iounmap(dev->virt_addr);
3149 release_mem_region(pci_resource_start(pdev, 0),
3150 pci_resource_len(pdev, 0));
3151 pci_disable_device(pdev);
55d402d8 3152
55d402d8
TD
3153 udc_remove(dev);
3154}
3155
3156/* create dma pools on init */
3157static int init_dma_pools(struct udc *dev)
3158{
3159 struct udc_stp_dma *td_stp;
3160 struct udc_data_dma *td_data;
3161 int retval;
3162
3163 /* consistent DMA mode setting ? */
3164 if (use_dma_ppb) {
3165 use_dma_bufferfill_mode = 0;
3166 } else {
3167 use_dma_ppb_du = 0;
3168 use_dma_bufferfill_mode = 1;
3169 }
3170
3171 /* DMA setup */
3172 dev->data_requests = dma_pool_create("data_requests", NULL,
3173 sizeof(struct udc_data_dma), 0, 0);
3174 if (!dev->data_requests) {
3175 DBG(dev, "can't get request data pool\n");
14a37ec6 3176 return -ENOMEM;
55d402d8
TD
3177 }
3178
3179 /* EP0 in dma regs = dev control regs */
3180 dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
3181
3182 /* dma desc for setup data */
3183 dev->stp_requests = dma_pool_create("setup requests", NULL,
3184 sizeof(struct udc_stp_dma), 0, 0);
3185 if (!dev->stp_requests) {
3186 DBG(dev, "can't get stp request pool\n");
3187 retval = -ENOMEM;
14a37ec6 3188 goto err_create_dma_pool;
55d402d8
TD
3189 }
3190 /* setup */
3191 td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3192 &dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3193 if (td_stp == NULL) {
3194 retval = -ENOMEM;
14a37ec6 3195 goto err_alloc_dma;
55d402d8
TD
3196 }
3197 dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
3198
3199 /* data: 0 packets !? */
3200 td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3201 &dev->ep[UDC_EP0OUT_IX].td_phys);
3202 if (td_data == NULL) {
3203 retval = -ENOMEM;
14a37ec6 3204 goto err_alloc_phys;
55d402d8
TD
3205 }
3206 dev->ep[UDC_EP0OUT_IX].td = td_data;
3207 return 0;
3208
14a37ec6
SM
3209err_alloc_phys:
3210 dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
3211 dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3212err_alloc_dma:
3213 dma_pool_destroy(dev->stp_requests);
3214 dev->stp_requests = NULL;
3215err_create_dma_pool:
3216 dma_pool_destroy(dev->data_requests);
3217 dev->data_requests = NULL;
55d402d8
TD
3218 return retval;
3219}
3220
4f06b6bb
SM
3221/* general probe */
3222static int udc_probe(struct udc *dev)
3223{
3224 char tmp[128];
3225 u32 reg;
3226 int retval;
3227
3228 /* mark timer as not initialized */
3229 udc_timer.data = 0;
3230 udc_pollstall_timer.data = 0;
3231
3232 /* device struct setup */
3233 dev->gadget.ops = &udc_ops;
3234
3235 dev_set_name(&dev->gadget.dev, "gadget");
3236 dev->gadget.name = name;
3237 dev->gadget.max_speed = USB_SPEED_HIGH;
3238
3239 /* init registers, interrupts, ... */
3240 startup_registers(dev);
3241
3242 dev_info(&dev->pdev->dev, "%s\n", mod_desc);
3243
3244 snprintf(tmp, sizeof(tmp), "%d", dev->irq);
3245 dev_info(&dev->pdev->dev,
3246 "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
3247 tmp, dev->phys_addr, dev->chiprev,
3248 (dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1");
3249 strcpy(tmp, UDC_DRIVER_VERSION_STRING);
3250 if (dev->chiprev == UDC_HSA0_REV) {
3251 dev_err(&dev->pdev->dev, "chip revision is A0; too old\n");
3252 retval = -ENODEV;
3253 goto finished;
3254 }
3255 dev_info(&dev->pdev->dev,
3256 "driver version: %s(for Geode5536 B1)\n", tmp);
3257 udc = dev;
3258
3259 retval = usb_add_gadget_udc_release(&udc->pdev->dev, &dev->gadget,
3260 gadget_release);
3261 if (retval)
3262 goto finished;
3263
3264 /* timer init */
3265 init_timer(&udc_timer);
3266 udc_timer.function = udc_timer_function;
3267 udc_timer.data = 1;
3268 /* timer pollstall init */
3269 init_timer(&udc_pollstall_timer);
3270 udc_pollstall_timer.function = udc_pollstall_timer_function;
3271 udc_pollstall_timer.data = 1;
3272
3273 /* set SD */
3274 reg = readl(&dev->regs->ctl);
3275 reg |= AMD_BIT(UDC_DEVCTL_SD);
3276 writel(reg, &dev->regs->ctl);
3277
3278 /* print dev register info */
3279 print_regs(dev);
3280
3281 return 0;
3282
3283finished:
3284 return retval;
3285}
3286
55d402d8
TD
3287/* Called by pci bus driver to init pci context */
3288static int udc_pci_probe(
3289 struct pci_dev *pdev,
3290 const struct pci_device_id *id
3291)
3292{
3293 struct udc *dev;
3294 unsigned long resource;
3295 unsigned long len;
3296 int retval = 0;
3297
3298 /* one udc only */
3299 if (udc) {
3300 dev_dbg(&pdev->dev, "already probed\n");
3301 return -EBUSY;
3302 }
3303
3304 /* init */
3305 dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
6527cc27
AK
3306 if (!dev)
3307 return -ENOMEM;
55d402d8
TD
3308
3309 /* pci setup */
3310 if (pci_enable_device(pdev) < 0) {
3311 retval = -ENODEV;
6527cc27 3312 goto err_pcidev;
55d402d8 3313 }
55d402d8
TD
3314
3315 /* PCI resource allocation */
3316 resource = pci_resource_start(pdev, 0);
3317 len = pci_resource_len(pdev, 0);
3318
3319 if (!request_mem_region(resource, len, name)) {
3320 dev_dbg(&pdev->dev, "pci device used already\n");
3321 retval = -EBUSY;
6527cc27 3322 goto err_memreg;
55d402d8 3323 }
55d402d8
TD
3324
3325 dev->virt_addr = ioremap_nocache(resource, len);
3326 if (dev->virt_addr == NULL) {
3327 dev_dbg(&pdev->dev, "start address cannot be mapped\n");
3328 retval = -EFAULT;
6527cc27 3329 goto err_ioremap;
55d402d8
TD
3330 }
3331
3332 if (!pdev->irq) {
25e14c1f 3333 dev_err(&pdev->dev, "irq not set\n");
55d402d8 3334 retval = -ENODEV;
6527cc27 3335 goto err_irq;
55d402d8
TD
3336 }
3337
c5deb832
TD
3338 spin_lock_init(&dev->lock);
3339 /* udc csr registers base */
3340 dev->csr = dev->virt_addr + UDC_CSR_ADDR;
3341 /* dev registers base */
3342 dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
3343 /* ep registers base */
3344 dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
3345 /* fifo's base */
3346 dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR);
3347 dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
3348
55d402d8 3349 if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
25e14c1f 3350 dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq);
55d402d8 3351 retval = -EBUSY;
6527cc27 3352 goto err_irq;
55d402d8 3353 }
55d402d8
TD
3354
3355 pci_set_drvdata(pdev, dev);
3356
1d3ee41e
AK
3357 /* chip revision for Hs AMD5536 */
3358 dev->chiprev = pdev->revision;
55d402d8
TD
3359
3360 pci_set_master(pdev);
51745281 3361 pci_try_set_mwi(pdev);
55d402d8 3362
55d402d8
TD
3363 /* init dma pools */
3364 if (use_dma) {
3365 retval = init_dma_pools(dev);
3366 if (retval != 0)
580693bb 3367 goto err_dma;
55d402d8
TD
3368 }
3369
3370 dev->phys_addr = resource;
3371 dev->irq = pdev->irq;
3372 dev->pdev = pdev;
55d402d8
TD
3373
3374 /* general probing */
580693bb
SM
3375 if (udc_probe(dev)) {
3376 retval = -ENODEV;
3377 goto err_probe;
3378 }
3379 return 0;
6527cc27 3380
580693bb
SM
3381err_probe:
3382 if (use_dma)
3383 free_dma_pools(dev);
3384err_dma:
3385 free_irq(pdev->irq, dev);
6527cc27
AK
3386err_irq:
3387 iounmap(dev->virt_addr);
3388err_ioremap:
3389 release_mem_region(resource, len);
3390err_memreg:
3391 pci_disable_device(pdev);
3392err_pcidev:
3393 kfree(dev);
55d402d8
TD
3394 return retval;
3395}
3396
55d402d8 3397/* PCI device parameters */
9510ecee 3398static const struct pci_device_id pci_id[] = {
55d402d8
TD
3399 {
3400 PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096),
3401 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3402 .class_mask = 0xffffffff,
3403 },
3404 {},
3405};
3406MODULE_DEVICE_TABLE(pci, pci_id);
3407
3408/* PCI functions */
3409static struct pci_driver udc_pci_driver = {
3410 .name = (char *) name,
3411 .id_table = pci_id,
3412 .probe = udc_pci_probe,
3413 .remove = udc_pci_remove,
3414};
3415
3cdb7721 3416module_pci_driver(udc_pci_driver);
55d402d8
TD
3417
3418MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
3419MODULE_AUTHOR("Thomas Dahlmann");
3420MODULE_LICENSE("GPL");
3421