usb: gadget: amd5536udc: remove forward declaration of udc_remote_wakeup
[linux-2.6-block.git] / drivers / usb / gadget / udc / amd5536udc.c
CommitLineData
55d402d8
TD
1/*
2 * amd5536.c -- AMD 5536 UDC high/full speed USB device controller
3 *
4 * Copyright (C) 2005-2007 AMD (http://www.amd.com)
5 * Author: Thomas Dahlmann
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
55d402d8
TD
11 */
12
13/*
14 * The AMD5536 UDC is part of the x86 southbridge AMD Geode CS5536.
15 * It is a USB Highspeed DMA capable USB device controller. Beside ep0 it
16 * provides 4 IN and 4 OUT endpoints (bulk or interrupt type).
17 *
18 * Make sure that UDC is assigned to port 4 by BIOS settings (port can also
19 * be used as host port) and UOC bits PAD_EN and APU are set (should be done
20 * by BIOS init).
21 *
22 * UDC DMA requires 32-bit aligned buffers so DMA with gadget ether does not
23 * work without updating NET_IP_ALIGN. Or PIO mode (module param "use_dma=0")
24 * can be used with gadget ether.
25 */
26
27/* debug control */
28/* #define UDC_VERBOSE */
29
30/* Driver strings */
31#define UDC_MOD_DESCRIPTION "AMD 5536 UDC - USB Device Controller"
c15e03e1 32#define UDC_DRIVER_VERSION_STRING "01.00.0206"
55d402d8
TD
33
34/* system */
35#include <linux/module.h>
36#include <linux/pci.h>
37#include <linux/kernel.h>
55d402d8
TD
38#include <linux/delay.h>
39#include <linux/ioport.h>
40#include <linux/sched.h>
41#include <linux/slab.h>
55d402d8 42#include <linux/errno.h>
55d402d8
TD
43#include <linux/timer.h>
44#include <linux/list.h>
45#include <linux/interrupt.h>
46#include <linux/ioctl.h>
47#include <linux/fs.h>
48#include <linux/dmapool.h>
49#include <linux/moduleparam.h>
50#include <linux/device.h>
51#include <linux/io.h>
52#include <linux/irq.h>
b38b03b3 53#include <linux/prefetch.h>
55d402d8
TD
54
55#include <asm/byteorder.h>
55d402d8
TD
56#include <asm/unaligned.h>
57
58/* gadget stack */
59#include <linux/usb/ch9.h>
9454a57a 60#include <linux/usb/gadget.h>
55d402d8
TD
61
62/* udc specific */
63#include "amd5536udc.h"
64
65
66static void udc_tasklet_disconnect(unsigned long);
67static void empty_req_queue(struct udc_ep *);
55d402d8
TD
68static void udc_basic_init(struct udc *dev);
69static void udc_setup_endpoints(struct udc *dev);
70static void udc_soft_reset(struct udc *dev);
71static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
72static void udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq);
73static int udc_free_dma_chain(struct udc *dev, struct udc_request *req);
74static int udc_create_dma_chain(struct udc_ep *ep, struct udc_request *req,
75 unsigned long buf_len, gfp_t gfp_flags);
55d402d8
TD
76static int udc_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id);
77static void udc_pci_remove(struct pci_dev *pdev);
78
79/* description */
80static const char mod_desc[] = UDC_MOD_DESCRIPTION;
81static const char name[] = "amd5536udc";
82
83/* structure to hold endpoint function pointers */
84static const struct usb_ep_ops udc_ep_ops;
85
86/* received setup data */
87static union udc_setup_data setup_data;
88
89/* pointer to device object */
90static struct udc *udc;
91
92/* irq spin lock for soft reset */
93static DEFINE_SPINLOCK(udc_irq_spinlock);
94/* stall spin lock */
95static DEFINE_SPINLOCK(udc_stall_spinlock);
96
97/*
98* slave mode: pending bytes in rx fifo after nyet,
99* used if EPIN irq came but no req was available
100*/
101static unsigned int udc_rxfifo_pending;
102
103/* count soft resets after suspend to avoid loop */
104static int soft_reset_occured;
105static int soft_reset_after_usbreset_occured;
106
107/* timer */
108static struct timer_list udc_timer;
109static int stop_timer;
110
111/* set_rde -- Is used to control enabling of RX DMA. Problem is
112 * that UDC has only one bit (RDE) to enable/disable RX DMA for
113 * all OUT endpoints. So we have to handle race conditions like
114 * when OUT data reaches the fifo but no request was queued yet.
115 * This cannot be solved by letting the RX DMA disabled until a
116 * request gets queued because there may be other OUT packets
117 * in the FIFO (important for not blocking control traffic).
118 * The value of set_rde controls the correspondig timer.
119 *
120 * set_rde -1 == not used, means it is alloed to be set to 0 or 1
121 * set_rde 0 == do not touch RDE, do no start the RDE timer
122 * set_rde 1 == timer function will look whether FIFO has data
123 * set_rde 2 == set by timer function to enable RX DMA on next call
124 */
125static int set_rde = -1;
126
127static DECLARE_COMPLETION(on_exit);
128static struct timer_list udc_pollstall_timer;
129static int stop_pollstall_timer;
130static DECLARE_COMPLETION(on_pollstall_exit);
131
132/* tasklet for usb disconnect */
133static DECLARE_TASKLET(disconnect_tasklet, udc_tasklet_disconnect,
134 (unsigned long) &udc);
135
136
137/* endpoint names used for print */
138static const char ep0_string[] = "ep0in";
6f02ac5a
RB
139static const struct {
140 const char *name;
141 const struct usb_ep_caps caps;
142} ep_info[] = {
143#define EP_INFO(_name, _caps) \
144 { \
145 .name = _name, \
146 .caps = _caps, \
147 }
148
149 EP_INFO(ep0_string,
150 USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_IN)),
151 EP_INFO("ep1in-int",
152 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
153 EP_INFO("ep2in-bulk",
154 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
155 EP_INFO("ep3in-bulk",
156 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
157 EP_INFO("ep4in-bulk",
158 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
159 EP_INFO("ep5in-bulk",
160 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
161 EP_INFO("ep6in-bulk",
162 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
163 EP_INFO("ep7in-bulk",
164 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
165 EP_INFO("ep8in-bulk",
166 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
167 EP_INFO("ep9in-bulk",
168 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
169 EP_INFO("ep10in-bulk",
170 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
171 EP_INFO("ep11in-bulk",
172 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
173 EP_INFO("ep12in-bulk",
174 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
175 EP_INFO("ep13in-bulk",
176 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
177 EP_INFO("ep14in-bulk",
178 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
179 EP_INFO("ep15in-bulk",
180 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)),
181 EP_INFO("ep0out",
182 USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_OUT)),
183 EP_INFO("ep1out-bulk",
184 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
185 EP_INFO("ep2out-bulk",
186 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
187 EP_INFO("ep3out-bulk",
188 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
189 EP_INFO("ep4out-bulk",
190 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
191 EP_INFO("ep5out-bulk",
192 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
193 EP_INFO("ep6out-bulk",
194 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
195 EP_INFO("ep7out-bulk",
196 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
197 EP_INFO("ep8out-bulk",
198 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
199 EP_INFO("ep9out-bulk",
200 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
201 EP_INFO("ep10out-bulk",
202 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
203 EP_INFO("ep11out-bulk",
204 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
205 EP_INFO("ep12out-bulk",
206 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
207 EP_INFO("ep13out-bulk",
208 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
209 EP_INFO("ep14out-bulk",
210 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
211 EP_INFO("ep15out-bulk",
212 USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)),
213
214#undef EP_INFO
55d402d8
TD
215};
216
217/* DMA usage flag */
90ab5ee9 218static bool use_dma = 1;
55d402d8 219/* packet per buffer dma */
90ab5ee9 220static bool use_dma_ppb = 1;
55d402d8 221/* with per descr. update */
90ab5ee9 222static bool use_dma_ppb_du;
55d402d8
TD
223/* buffer fill mode */
224static int use_dma_bufferfill_mode;
225/* full speed only mode */
90ab5ee9 226static bool use_fullspeed;
55d402d8
TD
227/* tx buffer size for high speed */
228static unsigned long hs_tx_buf = UDC_EPIN_BUFF_SIZE;
229
230/* module parameters */
231module_param(use_dma, bool, S_IRUGO);
232MODULE_PARM_DESC(use_dma, "true for DMA");
233module_param(use_dma_ppb, bool, S_IRUGO);
234MODULE_PARM_DESC(use_dma_ppb, "true for DMA in packet per buffer mode");
235module_param(use_dma_ppb_du, bool, S_IRUGO);
236MODULE_PARM_DESC(use_dma_ppb_du,
237 "true for DMA in packet per buffer mode with descriptor update");
238module_param(use_fullspeed, bool, S_IRUGO);
239MODULE_PARM_DESC(use_fullspeed, "true for fullspeed only");
240
241/*---------------------------------------------------------------------------*/
242/* Prints UDC device registers and endpoint irq registers */
243static void print_regs(struct udc *dev)
244{
245 DBG(dev, "------- Device registers -------\n");
246 DBG(dev, "dev config = %08x\n", readl(&dev->regs->cfg));
247 DBG(dev, "dev control = %08x\n", readl(&dev->regs->ctl));
248 DBG(dev, "dev status = %08x\n", readl(&dev->regs->sts));
249 DBG(dev, "\n");
250 DBG(dev, "dev int's = %08x\n", readl(&dev->regs->irqsts));
251 DBG(dev, "dev intmask = %08x\n", readl(&dev->regs->irqmsk));
252 DBG(dev, "\n");
253 DBG(dev, "dev ep int's = %08x\n", readl(&dev->regs->ep_irqsts));
254 DBG(dev, "dev ep intmask = %08x\n", readl(&dev->regs->ep_irqmsk));
255 DBG(dev, "\n");
256 DBG(dev, "USE DMA = %d\n", use_dma);
257 if (use_dma && use_dma_ppb && !use_dma_ppb_du) {
258 DBG(dev, "DMA mode = PPBNDU (packet per buffer "
259 "WITHOUT desc. update)\n");
260 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBNDU");
0cf7a633 261 } else if (use_dma && use_dma_ppb && use_dma_ppb_du) {
55d402d8
TD
262 DBG(dev, "DMA mode = PPBDU (packet per buffer "
263 "WITH desc. update)\n");
264 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "PPBDU");
265 }
266 if (use_dma && use_dma_bufferfill_mode) {
267 DBG(dev, "DMA mode = BF (buffer fill mode)\n");
268 dev_info(&dev->pdev->dev, "DMA mode (%s)\n", "BF");
269 }
170b778f 270 if (!use_dma)
55d402d8 271 dev_info(&dev->pdev->dev, "FIFO mode\n");
55d402d8
TD
272 DBG(dev, "-------------------------------------------------------\n");
273}
274
275/* Masks unused interrupts */
276static int udc_mask_unused_interrupts(struct udc *dev)
277{
278 u32 tmp;
279
280 /* mask all dev interrupts */
281 tmp = AMD_BIT(UDC_DEVINT_SVC) |
282 AMD_BIT(UDC_DEVINT_ENUM) |
283 AMD_BIT(UDC_DEVINT_US) |
284 AMD_BIT(UDC_DEVINT_UR) |
285 AMD_BIT(UDC_DEVINT_ES) |
286 AMD_BIT(UDC_DEVINT_SI) |
287 AMD_BIT(UDC_DEVINT_SOF)|
288 AMD_BIT(UDC_DEVINT_SC);
289 writel(tmp, &dev->regs->irqmsk);
290
291 /* mask all ep interrupts */
292 writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqmsk);
293
294 return 0;
295}
296
297/* Enables endpoint 0 interrupts */
298static int udc_enable_ep0_interrupts(struct udc *dev)
299{
300 u32 tmp;
301
302 DBG(dev, "udc_enable_ep0_interrupts()\n");
303
304 /* read irq mask */
305 tmp = readl(&dev->regs->ep_irqmsk);
306 /* enable ep0 irq's */
307 tmp &= AMD_UNMASK_BIT(UDC_EPINT_IN_EP0)
308 & AMD_UNMASK_BIT(UDC_EPINT_OUT_EP0);
309 writel(tmp, &dev->regs->ep_irqmsk);
310
311 return 0;
312}
313
314/* Enables device interrupts for SET_INTF and SET_CONFIG */
315static int udc_enable_dev_setup_interrupts(struct udc *dev)
316{
317 u32 tmp;
318
319 DBG(dev, "enable device interrupts for setup data\n");
320
321 /* read irq mask */
322 tmp = readl(&dev->regs->irqmsk);
323
324 /* enable SET_INTERFACE, SET_CONFIG and other needed irq's */
325 tmp &= AMD_UNMASK_BIT(UDC_DEVINT_SI)
326 & AMD_UNMASK_BIT(UDC_DEVINT_SC)
327 & AMD_UNMASK_BIT(UDC_DEVINT_UR)
328 & AMD_UNMASK_BIT(UDC_DEVINT_SVC)
329 & AMD_UNMASK_BIT(UDC_DEVINT_ENUM);
330 writel(tmp, &dev->regs->irqmsk);
331
332 return 0;
333}
334
25985edc 335/* Calculates fifo start of endpoint based on preceding endpoints */
55d402d8
TD
336static int udc_set_txfifo_addr(struct udc_ep *ep)
337{
338 struct udc *dev;
339 u32 tmp;
340 int i;
341
342 if (!ep || !(ep->in))
343 return -EINVAL;
344
345 dev = ep->dev;
346 ep->txfifo = dev->txfifo;
347
348 /* traverse ep's */
349 for (i = 0; i < ep->num; i++) {
350 if (dev->ep[i].regs) {
351 /* read fifo size */
352 tmp = readl(&dev->ep[i].regs->bufin_framenum);
353 tmp = AMD_GETBITS(tmp, UDC_EPIN_BUFF_SIZE);
354 ep->txfifo += tmp;
355 }
356 }
357 return 0;
358}
359
360/* CNAK pending field: bit0 = ep0in, bit16 = ep0out */
361static u32 cnak_pending;
362
363static void UDC_QUEUE_CNAK(struct udc_ep *ep, unsigned num)
364{
365 if (readl(&ep->regs->ctl) & AMD_BIT(UDC_EPCTL_NAK)) {
366 DBG(ep->dev, "NAK could not be cleared for ep%d\n", num);
367 cnak_pending |= 1 << (num);
368 ep->naking = 1;
369 } else
370 cnak_pending = cnak_pending & (~(1 << (num)));
371}
372
373
374/* Enables endpoint, is called by gadget driver */
375static int
376udc_ep_enable(struct usb_ep *usbep, const struct usb_endpoint_descriptor *desc)
377{
378 struct udc_ep *ep;
379 struct udc *dev;
380 u32 tmp;
381 unsigned long iflags;
382 u8 udc_csr_epix;
fd05e720 383 unsigned maxpacket;
55d402d8
TD
384
385 if (!usbep
386 || usbep->name == ep0_string
387 || !desc
388 || desc->bDescriptorType != USB_DT_ENDPOINT)
389 return -EINVAL;
390
391 ep = container_of(usbep, struct udc_ep, ep);
392 dev = ep->dev;
393
394 DBG(dev, "udc_ep_enable() ep %d\n", ep->num);
395
396 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
397 return -ESHUTDOWN;
398
399 spin_lock_irqsave(&dev->lock, iflags);
ef20a72b 400 ep->ep.desc = desc;
55d402d8
TD
401
402 ep->halted = 0;
403
404 /* set traffic type */
405 tmp = readl(&dev->ep[ep->num].regs->ctl);
406 tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_EPCTL_ET);
407 writel(tmp, &dev->ep[ep->num].regs->ctl);
408
409 /* set max packet size */
29cc8897 410 maxpacket = usb_endpoint_maxp(desc);
55d402d8 411 tmp = readl(&dev->ep[ep->num].regs->bufout_maxpkt);
fd05e720
AV
412 tmp = AMD_ADDBITS(tmp, maxpacket, UDC_EP_MAX_PKT_SIZE);
413 ep->ep.maxpacket = maxpacket;
55d402d8
TD
414 writel(tmp, &dev->ep[ep->num].regs->bufout_maxpkt);
415
416 /* IN ep */
417 if (ep->in) {
418
419 /* ep ix in UDC CSR register space */
420 udc_csr_epix = ep->num;
421
422 /* set buffer size (tx fifo entries) */
423 tmp = readl(&dev->ep[ep->num].regs->bufin_framenum);
424 /* double buffering: fifo size = 2 x max packet size */
425 tmp = AMD_ADDBITS(
426 tmp,
fd05e720
AV
427 maxpacket * UDC_EPIN_BUFF_SIZE_MULT
428 / UDC_DWORD_BYTES,
55d402d8
TD
429 UDC_EPIN_BUFF_SIZE);
430 writel(tmp, &dev->ep[ep->num].regs->bufin_framenum);
431
432 /* calc. tx fifo base addr */
433 udc_set_txfifo_addr(ep);
434
435 /* flush fifo */
436 tmp = readl(&ep->regs->ctl);
437 tmp |= AMD_BIT(UDC_EPCTL_F);
438 writel(tmp, &ep->regs->ctl);
439
440 /* OUT ep */
441 } else {
442 /* ep ix in UDC CSR register space */
443 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
444
445 /* set max packet size UDC CSR */
446 tmp = readl(&dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
fd05e720 447 tmp = AMD_ADDBITS(tmp, maxpacket,
55d402d8
TD
448 UDC_CSR_NE_MAX_PKT);
449 writel(tmp, &dev->csr->ne[ep->num - UDC_CSR_EP_OUT_IX_OFS]);
450
451 if (use_dma && !ep->in) {
452 /* alloc and init BNA dummy request */
453 ep->bna_dummy_req = udc_alloc_bna_dummy(ep);
454 ep->bna_occurred = 0;
455 }
456
457 if (ep->num != UDC_EP0OUT_IX)
458 dev->data_ep_enabled = 1;
459 }
460
461 /* set ep values */
462 tmp = readl(&dev->csr->ne[udc_csr_epix]);
463 /* max packet */
fd05e720 464 tmp = AMD_ADDBITS(tmp, maxpacket, UDC_CSR_NE_MAX_PKT);
55d402d8
TD
465 /* ep number */
466 tmp = AMD_ADDBITS(tmp, desc->bEndpointAddress, UDC_CSR_NE_NUM);
467 /* ep direction */
468 tmp = AMD_ADDBITS(tmp, ep->in, UDC_CSR_NE_DIR);
469 /* ep type */
470 tmp = AMD_ADDBITS(tmp, desc->bmAttributes, UDC_CSR_NE_TYPE);
471 /* ep config */
472 tmp = AMD_ADDBITS(tmp, ep->dev->cur_config, UDC_CSR_NE_CFG);
473 /* ep interface */
474 tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf, UDC_CSR_NE_INTF);
475 /* ep alt */
476 tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt, UDC_CSR_NE_ALT);
477 /* write reg */
478 writel(tmp, &dev->csr->ne[udc_csr_epix]);
479
480 /* enable ep irq */
481 tmp = readl(&dev->regs->ep_irqmsk);
482 tmp &= AMD_UNMASK_BIT(ep->num);
483 writel(tmp, &dev->regs->ep_irqmsk);
484
485 /*
486 * clear NAK by writing CNAK
487 * avoid BNA for OUT DMA, don't clear NAK until DMA desc. written
488 */
489 if (!use_dma || ep->in) {
490 tmp = readl(&ep->regs->ctl);
491 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
492 writel(tmp, &ep->regs->ctl);
493 ep->naking = 0;
494 UDC_QUEUE_CNAK(ep, ep->num);
495 }
496 tmp = desc->bEndpointAddress;
497 DBG(dev, "%s enabled\n", usbep->name);
498
499 spin_unlock_irqrestore(&dev->lock, iflags);
500 return 0;
501}
502
503/* Resets endpoint */
504static void ep_init(struct udc_regs __iomem *regs, struct udc_ep *ep)
505{
506 u32 tmp;
507
508 VDBG(ep->dev, "ep-%d reset\n", ep->num);
f9c56cdd 509 ep->ep.desc = NULL;
55d402d8
TD
510 ep->ep.ops = &udc_ep_ops;
511 INIT_LIST_HEAD(&ep->queue);
512
e117e742 513 usb_ep_set_maxpacket_limit(&ep->ep,(u16) ~0);
55d402d8
TD
514 /* set NAK */
515 tmp = readl(&ep->regs->ctl);
516 tmp |= AMD_BIT(UDC_EPCTL_SNAK);
517 writel(tmp, &ep->regs->ctl);
518 ep->naking = 1;
519
520 /* disable interrupt */
521 tmp = readl(&regs->ep_irqmsk);
522 tmp |= AMD_BIT(ep->num);
523 writel(tmp, &regs->ep_irqmsk);
524
525 if (ep->in) {
526 /* unset P and IN bit of potential former DMA */
527 tmp = readl(&ep->regs->ctl);
528 tmp &= AMD_UNMASK_BIT(UDC_EPCTL_P);
529 writel(tmp, &ep->regs->ctl);
530
531 tmp = readl(&ep->regs->sts);
532 tmp |= AMD_BIT(UDC_EPSTS_IN);
533 writel(tmp, &ep->regs->sts);
534
535 /* flush the fifo */
536 tmp = readl(&ep->regs->ctl);
537 tmp |= AMD_BIT(UDC_EPCTL_F);
538 writel(tmp, &ep->regs->ctl);
539
540 }
541 /* reset desc pointer */
542 writel(0, &ep->regs->desptr);
543}
544
545/* Disables endpoint, is called by gadget driver */
546static int udc_ep_disable(struct usb_ep *usbep)
547{
548 struct udc_ep *ep = NULL;
549 unsigned long iflags;
550
551 if (!usbep)
552 return -EINVAL;
553
554 ep = container_of(usbep, struct udc_ep, ep);
ef20a72b 555 if (usbep->name == ep0_string || !ep->ep.desc)
55d402d8
TD
556 return -EINVAL;
557
558 DBG(ep->dev, "Disable ep-%d\n", ep->num);
559
560 spin_lock_irqsave(&ep->dev->lock, iflags);
561 udc_free_request(&ep->ep, &ep->bna_dummy_req->req);
562 empty_req_queue(ep);
563 ep_init(ep->dev->regs, ep);
564 spin_unlock_irqrestore(&ep->dev->lock, iflags);
565
566 return 0;
567}
568
569/* Allocates request packet, called by gadget driver */
570static struct usb_request *
571udc_alloc_request(struct usb_ep *usbep, gfp_t gfp)
572{
573 struct udc_request *req;
574 struct udc_data_dma *dma_desc;
575 struct udc_ep *ep;
576
577 if (!usbep)
578 return NULL;
579
580 ep = container_of(usbep, struct udc_ep, ep);
581
582 VDBG(ep->dev, "udc_alloc_req(): ep%d\n", ep->num);
583 req = kzalloc(sizeof(struct udc_request), gfp);
584 if (!req)
585 return NULL;
586
587 req->req.dma = DMA_DONT_USE;
588 INIT_LIST_HEAD(&req->queue);
589
590 if (ep->dma) {
591 /* ep0 in requests are allocated from data pool here */
592 dma_desc = pci_pool_alloc(ep->dev->data_requests, gfp,
593 &req->td_phys);
594 if (!dma_desc) {
595 kfree(req);
596 return NULL;
597 }
598
599 VDBG(ep->dev, "udc_alloc_req: req = %p dma_desc = %p, "
600 "td_phys = %lx\n",
601 req, dma_desc,
602 (unsigned long)req->td_phys);
603 /* prevent from using desc. - set HOST BUSY */
604 dma_desc->status = AMD_ADDBITS(dma_desc->status,
605 UDC_DMA_STP_STS_BS_HOST_BUSY,
606 UDC_DMA_STP_STS_BS);
551509d2 607 dma_desc->bufptr = cpu_to_le32(DMA_DONT_USE);
55d402d8
TD
608 req->td_data = dma_desc;
609 req->td_data_last = NULL;
610 req->chain_len = 1;
611 }
612
613 return &req->req;
614}
615
616/* Frees request packet, called by gadget driver */
617static void
618udc_free_request(struct usb_ep *usbep, struct usb_request *usbreq)
619{
620 struct udc_ep *ep;
621 struct udc_request *req;
622
623 if (!usbep || !usbreq)
624 return;
625
626 ep = container_of(usbep, struct udc_ep, ep);
627 req = container_of(usbreq, struct udc_request, req);
628 VDBG(ep->dev, "free_req req=%p\n", req);
629 BUG_ON(!list_empty(&req->queue));
630 if (req->td_data) {
631 VDBG(ep->dev, "req->td_data=%p\n", req->td_data);
632
633 /* free dma chain if created */
170b778f 634 if (req->chain_len > 1)
55d402d8 635 udc_free_dma_chain(ep->dev, req);
55d402d8
TD
636
637 pci_pool_free(ep->dev->data_requests, req->td_data,
638 req->td_phys);
639 }
640 kfree(req);
641}
642
643/* Init BNA dummy descriptor for HOST BUSY and pointing to itself */
644static void udc_init_bna_dummy(struct udc_request *req)
645{
646 if (req) {
647 /* set last bit */
648 req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
649 /* set next pointer to itself */
650 req->td_data->next = req->td_phys;
651 /* set HOST BUSY */
652 req->td_data->status
653 = AMD_ADDBITS(req->td_data->status,
654 UDC_DMA_STP_STS_BS_DMA_DONE,
655 UDC_DMA_STP_STS_BS);
656#ifdef UDC_VERBOSE
657 pr_debug("bna desc = %p, sts = %08x\n",
658 req->td_data, req->td_data->status);
659#endif
660 }
661}
662
663/* Allocate BNA dummy descriptor */
664static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep)
665{
666 struct udc_request *req = NULL;
667 struct usb_request *_req = NULL;
668
669 /* alloc the dummy request */
670 _req = udc_alloc_request(&ep->ep, GFP_ATOMIC);
671 if (_req) {
672 req = container_of(_req, struct udc_request, req);
673 ep->bna_dummy_req = req;
674 udc_init_bna_dummy(req);
675 }
676 return req;
677}
678
679/* Write data to TX fifo for IN packets */
680static void
681udc_txfifo_write(struct udc_ep *ep, struct usb_request *req)
682{
683 u8 *req_buf;
684 u32 *buf;
685 int i, j;
686 unsigned bytes = 0;
687 unsigned remaining = 0;
688
689 if (!req || !ep)
690 return;
691
692 req_buf = req->buf + req->actual;
693 prefetch(req_buf);
694 remaining = req->length - req->actual;
695
696 buf = (u32 *) req_buf;
697
698 bytes = ep->ep.maxpacket;
699 if (bytes > remaining)
700 bytes = remaining;
701
702 /* dwords first */
170b778f 703 for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
55d402d8 704 writel(*(buf + i), ep->txfifo);
55d402d8
TD
705
706 /* remaining bytes must be written by byte access */
707 for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
708 writeb((u8)(*(buf + i) >> (j << UDC_BITS_PER_BYTE_SHIFT)),
709 ep->txfifo);
710 }
711
712 /* dummy write confirm */
713 writel(0, &ep->regs->confirm);
714}
715
716/* Read dwords from RX fifo for OUT transfers */
717static int udc_rxfifo_read_dwords(struct udc *dev, u32 *buf, int dwords)
718{
719 int i;
720
721 VDBG(dev, "udc_read_dwords(): %d dwords\n", dwords);
722
170b778f 723 for (i = 0; i < dwords; i++)
55d402d8 724 *(buf + i) = readl(dev->rxfifo);
55d402d8
TD
725 return 0;
726}
727
728/* Read bytes from RX fifo for OUT transfers */
729static int udc_rxfifo_read_bytes(struct udc *dev, u8 *buf, int bytes)
730{
731 int i, j;
732 u32 tmp;
733
734 VDBG(dev, "udc_read_bytes(): %d bytes\n", bytes);
735
736 /* dwords first */
170b778f 737 for (i = 0; i < bytes / UDC_DWORD_BYTES; i++)
55d402d8 738 *((u32 *)(buf + (i<<2))) = readl(dev->rxfifo);
55d402d8
TD
739
740 /* remaining bytes must be read by byte access */
741 if (bytes % UDC_DWORD_BYTES) {
742 tmp = readl(dev->rxfifo);
743 for (j = 0; j < bytes % UDC_DWORD_BYTES; j++) {
744 *(buf + (i<<2) + j) = (u8)(tmp & UDC_BYTE_MASK);
745 tmp = tmp >> UDC_BITS_PER_BYTE;
746 }
747 }
748
749 return 0;
750}
751
752/* Read data from RX fifo for OUT transfers */
753static int
754udc_rxfifo_read(struct udc_ep *ep, struct udc_request *req)
755{
756 u8 *buf;
757 unsigned buf_space;
758 unsigned bytes = 0;
759 unsigned finished = 0;
760
761 /* received number bytes */
762 bytes = readl(&ep->regs->sts);
763 bytes = AMD_GETBITS(bytes, UDC_EPSTS_RX_PKT_SIZE);
764
765 buf_space = req->req.length - req->req.actual;
766 buf = req->req.buf + req->req.actual;
767 if (bytes > buf_space) {
768 if ((buf_space % ep->ep.maxpacket) != 0) {
769 DBG(ep->dev,
770 "%s: rx %d bytes, rx-buf space = %d bytesn\n",
771 ep->ep.name, bytes, buf_space);
772 req->req.status = -EOVERFLOW;
773 }
774 bytes = buf_space;
775 }
776 req->req.actual += bytes;
777
778 /* last packet ? */
779 if (((bytes % ep->ep.maxpacket) != 0) || (!bytes)
780 || ((req->req.actual == req->req.length) && !req->req.zero))
781 finished = 1;
782
783 /* read rx fifo bytes */
784 VDBG(ep->dev, "ep %s: rxfifo read %d bytes\n", ep->ep.name, bytes);
785 udc_rxfifo_read_bytes(ep->dev, buf, bytes);
786
787 return finished;
788}
789
790/* create/re-init a DMA descriptor or a DMA descriptor chain */
791static int prep_dma(struct udc_ep *ep, struct udc_request *req, gfp_t gfp)
792{
793 int retval = 0;
794 u32 tmp;
795
796 VDBG(ep->dev, "prep_dma\n");
797 VDBG(ep->dev, "prep_dma ep%d req->td_data=%p\n",
798 ep->num, req->td_data);
799
800 /* set buffer pointer */
801 req->td_data->bufptr = req->req.dma;
802
803 /* set last bit */
804 req->td_data->status |= AMD_BIT(UDC_DMA_IN_STS_L);
805
806 /* build/re-init dma chain if maxpkt scatter mode, not for EP0 */
807 if (use_dma_ppb) {
808
809 retval = udc_create_dma_chain(ep, req, ep->ep.maxpacket, gfp);
810 if (retval != 0) {
811 if (retval == -ENOMEM)
812 DBG(ep->dev, "Out of DMA memory\n");
813 return retval;
814 }
815 if (ep->in) {
816 if (req->req.length == ep->ep.maxpacket) {
817 /* write tx bytes */
818 req->td_data->status =
819 AMD_ADDBITS(req->td_data->status,
820 ep->ep.maxpacket,
821 UDC_DMA_IN_STS_TXBYTES);
822
823 }
824 }
825
826 }
827
828 if (ep->in) {
829 VDBG(ep->dev, "IN: use_dma_ppb=%d req->req.len=%d "
830 "maxpacket=%d ep%d\n",
831 use_dma_ppb, req->req.length,
832 ep->ep.maxpacket, ep->num);
833 /*
834 * if bytes < max packet then tx bytes must
835 * be written in packet per buffer mode
836 */
837 if (!use_dma_ppb || req->req.length < ep->ep.maxpacket
838 || ep->num == UDC_EP0OUT_IX
839 || ep->num == UDC_EP0IN_IX) {
840 /* write tx bytes */
841 req->td_data->status =
842 AMD_ADDBITS(req->td_data->status,
843 req->req.length,
844 UDC_DMA_IN_STS_TXBYTES);
845 /* reset frame num */
846 req->td_data->status =
847 AMD_ADDBITS(req->td_data->status,
848 0,
849 UDC_DMA_IN_STS_FRAMENUM);
850 }
851 /* set HOST BUSY */
852 req->td_data->status =
853 AMD_ADDBITS(req->td_data->status,
854 UDC_DMA_STP_STS_BS_HOST_BUSY,
855 UDC_DMA_STP_STS_BS);
856 } else {
857 VDBG(ep->dev, "OUT set host ready\n");
858 /* set HOST READY */
859 req->td_data->status =
860 AMD_ADDBITS(req->td_data->status,
861 UDC_DMA_STP_STS_BS_HOST_READY,
862 UDC_DMA_STP_STS_BS);
863
864
865 /* clear NAK by writing CNAK */
866 if (ep->naking) {
867 tmp = readl(&ep->regs->ctl);
868 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
869 writel(tmp, &ep->regs->ctl);
870 ep->naking = 0;
871 UDC_QUEUE_CNAK(ep, ep->num);
872 }
873
874 }
875
876 return retval;
877}
878
879/* Completes request packet ... caller MUST hold lock */
880static void
881complete_req(struct udc_ep *ep, struct udc_request *req, int sts)
882__releases(ep->dev->lock)
883__acquires(ep->dev->lock)
884{
885 struct udc *dev;
886 unsigned halted;
887
888 VDBG(ep->dev, "complete_req(): ep%d\n", ep->num);
889
890 dev = ep->dev;
891 /* unmap DMA */
220e8600
FB
892 if (ep->dma)
893 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->in);
55d402d8
TD
894
895 halted = ep->halted;
896 ep->halted = 1;
897
898 /* set new status if pending */
899 if (req->req.status == -EINPROGRESS)
900 req->req.status = sts;
901
902 /* remove from ep queue */
903 list_del_init(&req->queue);
904
905 VDBG(ep->dev, "req %p => complete %d bytes at %s with sts %d\n",
906 &req->req, req->req.length, ep->ep.name, sts);
907
908 spin_unlock(&dev->lock);
304f7e5e 909 usb_gadget_giveback_request(&ep->ep, &req->req);
55d402d8
TD
910 spin_lock(&dev->lock);
911 ep->halted = halted;
912}
913
914/* frees pci pool descriptors of a DMA chain */
915static int udc_free_dma_chain(struct udc *dev, struct udc_request *req)
916{
917
918 int ret_val = 0;
919 struct udc_data_dma *td;
920 struct udc_data_dma *td_last = NULL;
921 unsigned int i;
922
923 DBG(dev, "free chain req = %p\n", req);
924
925 /* do not free first desc., will be done by free for request */
926 td_last = req->td_data;
927 td = phys_to_virt(td_last->next);
928
929 for (i = 1; i < req->chain_len; i++) {
930
931 pci_pool_free(dev->data_requests, td,
932 (dma_addr_t) td_last->next);
933 td_last = td;
934 td = phys_to_virt(td_last->next);
935 }
936
937 return ret_val;
938}
939
940/* Iterates to the end of a DMA chain and returns last descriptor */
941static struct udc_data_dma *udc_get_last_dma_desc(struct udc_request *req)
942{
943 struct udc_data_dma *td;
944
945 td = req->td_data;
170b778f 946 while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L)))
55d402d8 947 td = phys_to_virt(td->next);
55d402d8
TD
948
949 return td;
950
951}
952
953/* Iterates to the end of a DMA chain and counts bytes received */
954static u32 udc_get_ppbdu_rxbytes(struct udc_request *req)
955{
956 struct udc_data_dma *td;
957 u32 count;
958
959 td = req->td_data;
960 /* received number bytes */
961 count = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_RXBYTES);
962
963 while (td && !(td->status & AMD_BIT(UDC_DMA_IN_STS_L))) {
964 td = phys_to_virt(td->next);
965 /* received number bytes */
966 if (td) {
967 count += AMD_GETBITS(td->status,
968 UDC_DMA_OUT_STS_RXBYTES);
969 }
970 }
971
972 return count;
973
974}
975
976/* Creates or re-inits a DMA chain */
977static int udc_create_dma_chain(
978 struct udc_ep *ep,
979 struct udc_request *req,
980 unsigned long buf_len, gfp_t gfp_flags
981)
982{
983 unsigned long bytes = req->req.length;
984 unsigned int i;
985 dma_addr_t dma_addr;
986 struct udc_data_dma *td = NULL;
987 struct udc_data_dma *last = NULL;
988 unsigned long txbytes;
989 unsigned create_new_chain = 0;
990 unsigned len;
991
992 VDBG(ep->dev, "udc_create_dma_chain: bytes=%ld buf_len=%ld\n",
993 bytes, buf_len);
994 dma_addr = DMA_DONT_USE;
995
996 /* unset L bit in first desc for OUT */
170b778f 997 if (!ep->in)
55d402d8 998 req->td_data->status &= AMD_CLEAR_BIT(UDC_DMA_IN_STS_L);
55d402d8
TD
999
1000 /* alloc only new desc's if not already available */
1001 len = req->req.length / ep->ep.maxpacket;
170b778f 1002 if (req->req.length % ep->ep.maxpacket)
55d402d8 1003 len++;
55d402d8
TD
1004
1005 if (len > req->chain_len) {
1006 /* shorter chain already allocated before */
170b778f 1007 if (req->chain_len > 1)
55d402d8 1008 udc_free_dma_chain(ep->dev, req);
55d402d8
TD
1009 req->chain_len = len;
1010 create_new_chain = 1;
1011 }
1012
1013 td = req->td_data;
1014 /* gen. required number of descriptors and buffers */
1015 for (i = buf_len; i < bytes; i += buf_len) {
1016 /* create or determine next desc. */
1017 if (create_new_chain) {
1018
1019 td = pci_pool_alloc(ep->dev->data_requests,
1020 gfp_flags, &dma_addr);
1021 if (!td)
1022 return -ENOMEM;
1023
1024 td->status = 0;
1025 } else if (i == buf_len) {
1026 /* first td */
1027 td = (struct udc_data_dma *) phys_to_virt(
1028 req->td_data->next);
1029 td->status = 0;
1030 } else {
1031 td = (struct udc_data_dma *) phys_to_virt(last->next);
1032 td->status = 0;
1033 }
1034
1035
1036 if (td)
1037 td->bufptr = req->req.dma + i; /* assign buffer */
1038 else
1039 break;
1040
1041 /* short packet ? */
1042 if ((bytes - i) >= buf_len) {
1043 txbytes = buf_len;
1044 } else {
1045 /* short packet */
1046 txbytes = bytes - i;
1047 }
1048
1049 /* link td and assign tx bytes */
1050 if (i == buf_len) {
170b778f 1051 if (create_new_chain)
55d402d8 1052 req->td_data->next = dma_addr;
170b778f
CR
1053 /*
1054 else
1055 req->td_data->next = virt_to_phys(td);
1056 */
55d402d8
TD
1057 /* write tx bytes */
1058 if (ep->in) {
1059 /* first desc */
1060 req->td_data->status =
1061 AMD_ADDBITS(req->td_data->status,
1062 ep->ep.maxpacket,
1063 UDC_DMA_IN_STS_TXBYTES);
1064 /* second desc */
1065 td->status = AMD_ADDBITS(td->status,
1066 txbytes,
1067 UDC_DMA_IN_STS_TXBYTES);
1068 }
1069 } else {
170b778f 1070 if (create_new_chain)
55d402d8 1071 last->next = dma_addr;
170b778f
CR
1072 /*
1073 else
1074 last->next = virt_to_phys(td);
1075 */
55d402d8
TD
1076 if (ep->in) {
1077 /* write tx bytes */
1078 td->status = AMD_ADDBITS(td->status,
1079 txbytes,
1080 UDC_DMA_IN_STS_TXBYTES);
1081 }
1082 }
1083 last = td;
1084 }
1085 /* set last bit */
1086 if (td) {
1087 td->status |= AMD_BIT(UDC_DMA_IN_STS_L);
1088 /* last desc. points to itself */
1089 req->td_data_last = td;
1090 }
1091
1092 return 0;
1093}
1094
1095/* Enabling RX DMA */
1096static void udc_set_rde(struct udc *dev)
1097{
1098 u32 tmp;
1099
1100 VDBG(dev, "udc_set_rde()\n");
1101 /* stop RDE timer */
1102 if (timer_pending(&udc_timer)) {
1103 set_rde = 0;
1104 mod_timer(&udc_timer, jiffies - 1);
1105 }
1106 /* set RDE */
1107 tmp = readl(&dev->regs->ctl);
1108 tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1109 writel(tmp, &dev->regs->ctl);
1110}
1111
1112/* Queues a request packet, called by gadget driver */
1113static int
1114udc_queue(struct usb_ep *usbep, struct usb_request *usbreq, gfp_t gfp)
1115{
1116 int retval = 0;
1117 u8 open_rxfifo = 0;
1118 unsigned long iflags;
1119 struct udc_ep *ep;
1120 struct udc_request *req;
1121 struct udc *dev;
1122 u32 tmp;
1123
1124 /* check the inputs */
1125 req = container_of(usbreq, struct udc_request, req);
1126
1127 if (!usbep || !usbreq || !usbreq->complete || !usbreq->buf
1128 || !list_empty(&req->queue))
1129 return -EINVAL;
1130
1131 ep = container_of(usbep, struct udc_ep, ep);
ef20a72b 1132 if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
55d402d8
TD
1133 return -EINVAL;
1134
1135 VDBG(ep->dev, "udc_queue(): ep%d-in=%d\n", ep->num, ep->in);
1136 dev = ep->dev;
1137
1138 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
1139 return -ESHUTDOWN;
1140
1141 /* map dma (usually done before) */
220e8600 1142 if (ep->dma) {
55d402d8 1143 VDBG(dev, "DMA map req %p\n", req);
220e8600
FB
1144 retval = usb_gadget_map_request(&udc->gadget, usbreq, ep->in);
1145 if (retval)
1146 return retval;
55d402d8
TD
1147 }
1148
1149 VDBG(dev, "%s queue req %p, len %d req->td_data=%p buf %p\n",
1150 usbep->name, usbreq, usbreq->length,
1151 req->td_data, usbreq->buf);
1152
1153 spin_lock_irqsave(&dev->lock, iflags);
1154 usbreq->actual = 0;
1155 usbreq->status = -EINPROGRESS;
1156 req->dma_done = 0;
1157
1158 /* on empty queue just do first transfer */
1159 if (list_empty(&ep->queue)) {
1160 /* zlp */
1161 if (usbreq->length == 0) {
1162 /* IN zlp's are handled by hardware */
1163 complete_req(ep, req, 0);
1164 VDBG(dev, "%s: zlp\n", ep->ep.name);
1165 /*
1166 * if set_config or set_intf is waiting for ack by zlp
1167 * then set CSR_DONE
1168 */
1169 if (dev->set_cfg_not_acked) {
1170 tmp = readl(&dev->regs->ctl);
1171 tmp |= AMD_BIT(UDC_DEVCTL_CSR_DONE);
1172 writel(tmp, &dev->regs->ctl);
1173 dev->set_cfg_not_acked = 0;
1174 }
1175 /* setup command is ACK'ed now by zlp */
1176 if (dev->waiting_zlp_ack_ep0in) {
1177 /* clear NAK by writing CNAK in EP0_IN */
1178 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1179 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1180 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1181 dev->ep[UDC_EP0IN_IX].naking = 0;
1182 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX],
1183 UDC_EP0IN_IX);
1184 dev->waiting_zlp_ack_ep0in = 0;
1185 }
1186 goto finished;
1187 }
1188 if (ep->dma) {
ffcba5a5 1189 retval = prep_dma(ep, req, GFP_ATOMIC);
55d402d8
TD
1190 if (retval != 0)
1191 goto finished;
1192 /* write desc pointer to enable DMA */
1193 if (ep->in) {
1194 /* set HOST READY */
1195 req->td_data->status =
1196 AMD_ADDBITS(req->td_data->status,
1197 UDC_DMA_IN_STS_BS_HOST_READY,
1198 UDC_DMA_IN_STS_BS);
1199 }
1200
1201 /* disabled rx dma while descriptor update */
1202 if (!ep->in) {
1203 /* stop RDE timer */
1204 if (timer_pending(&udc_timer)) {
1205 set_rde = 0;
1206 mod_timer(&udc_timer, jiffies - 1);
1207 }
1208 /* clear RDE */
1209 tmp = readl(&dev->regs->ctl);
1210 tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1211 writel(tmp, &dev->regs->ctl);
1212 open_rxfifo = 1;
1213
1214 /*
1215 * if BNA occurred then let BNA dummy desc.
1216 * point to current desc.
1217 */
1218 if (ep->bna_occurred) {
1219 VDBG(dev, "copy to BNA dummy desc.\n");
1220 memcpy(ep->bna_dummy_req->td_data,
1221 req->td_data,
1222 sizeof(struct udc_data_dma));
1223 }
1224 }
1225 /* write desc pointer */
1226 writel(req->td_phys, &ep->regs->desptr);
1227
1228 /* clear NAK by writing CNAK */
1229 if (ep->naking) {
1230 tmp = readl(&ep->regs->ctl);
1231 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1232 writel(tmp, &ep->regs->ctl);
1233 ep->naking = 0;
1234 UDC_QUEUE_CNAK(ep, ep->num);
1235 }
1236
1237 if (ep->in) {
1238 /* enable ep irq */
1239 tmp = readl(&dev->regs->ep_irqmsk);
1240 tmp &= AMD_UNMASK_BIT(ep->num);
1241 writel(tmp, &dev->regs->ep_irqmsk);
1242 }
c5deb832
TD
1243 } else if (ep->in) {
1244 /* enable ep irq */
1245 tmp = readl(&dev->regs->ep_irqmsk);
1246 tmp &= AMD_UNMASK_BIT(ep->num);
1247 writel(tmp, &dev->regs->ep_irqmsk);
1248 }
55d402d8
TD
1249
1250 } else if (ep->dma) {
1251
1252 /*
1253 * prep_dma not used for OUT ep's, this is not possible
1254 * for PPB modes, because of chain creation reasons
1255 */
1256 if (ep->in) {
ffcba5a5 1257 retval = prep_dma(ep, req, GFP_ATOMIC);
55d402d8
TD
1258 if (retval != 0)
1259 goto finished;
1260 }
1261 }
1262 VDBG(dev, "list_add\n");
1263 /* add request to ep queue */
1264 if (req) {
1265
1266 list_add_tail(&req->queue, &ep->queue);
1267
1268 /* open rxfifo if out data queued */
1269 if (open_rxfifo) {
1270 /* enable DMA */
1271 req->dma_going = 1;
1272 udc_set_rde(dev);
1273 if (ep->num != UDC_EP0OUT_IX)
1274 dev->data_ep_queued = 1;
1275 }
1276 /* stop OUT naking */
1277 if (!ep->in) {
1278 if (!use_dma && udc_rxfifo_pending) {
fec8de3a 1279 DBG(dev, "udc_queue(): pending bytes in "
55d402d8
TD
1280 "rxfifo after nyet\n");
1281 /*
1282 * read pending bytes afer nyet:
1283 * referring to isr
1284 */
1285 if (udc_rxfifo_read(ep, req)) {
1286 /* finish */
1287 complete_req(ep, req, 0);
1288 }
1289 udc_rxfifo_pending = 0;
1290
1291 }
1292 }
1293 }
1294
1295finished:
1296 spin_unlock_irqrestore(&dev->lock, iflags);
1297 return retval;
1298}
1299
1300/* Empty request queue of an endpoint; caller holds spinlock */
1301static void empty_req_queue(struct udc_ep *ep)
1302{
1303 struct udc_request *req;
1304
1305 ep->halted = 1;
1306 while (!list_empty(&ep->queue)) {
1307 req = list_entry(ep->queue.next,
1308 struct udc_request,
1309 queue);
1310 complete_req(ep, req, -ESHUTDOWN);
1311 }
1312}
1313
1314/* Dequeues a request packet, called by gadget driver */
1315static int udc_dequeue(struct usb_ep *usbep, struct usb_request *usbreq)
1316{
1317 struct udc_ep *ep;
1318 struct udc_request *req;
1319 unsigned halted;
1320 unsigned long iflags;
1321
1322 ep = container_of(usbep, struct udc_ep, ep);
ef20a72b 1323 if (!usbep || !usbreq || (!ep->ep.desc && (ep->num != 0
55d402d8
TD
1324 && ep->num != UDC_EP0OUT_IX)))
1325 return -EINVAL;
1326
1327 req = container_of(usbreq, struct udc_request, req);
1328
1329 spin_lock_irqsave(&ep->dev->lock, iflags);
1330 halted = ep->halted;
1331 ep->halted = 1;
1332 /* request in processing or next one */
1333 if (ep->queue.next == &req->queue) {
1334 if (ep->dma && req->dma_going) {
1335 if (ep->in)
1336 ep->cancel_transfer = 1;
1337 else {
1338 u32 tmp;
1339 u32 dma_sts;
1340 /* stop potential receive DMA */
1341 tmp = readl(&udc->regs->ctl);
1342 writel(tmp & AMD_UNMASK_BIT(UDC_DEVCTL_RDE),
1343 &udc->regs->ctl);
1344 /*
1345 * Cancel transfer later in ISR
1346 * if descriptor was touched.
1347 */
1348 dma_sts = AMD_GETBITS(req->td_data->status,
1349 UDC_DMA_OUT_STS_BS);
1350 if (dma_sts != UDC_DMA_OUT_STS_BS_HOST_READY)
1351 ep->cancel_transfer = 1;
1352 else {
1353 udc_init_bna_dummy(ep->req);
1354 writel(ep->bna_dummy_req->td_phys,
1355 &ep->regs->desptr);
1356 }
1357 writel(tmp, &udc->regs->ctl);
1358 }
1359 }
1360 }
1361 complete_req(ep, req, -ECONNRESET);
1362 ep->halted = halted;
1363
1364 spin_unlock_irqrestore(&ep->dev->lock, iflags);
1365 return 0;
1366}
1367
1368/* Halt or clear halt of endpoint */
1369static int
1370udc_set_halt(struct usb_ep *usbep, int halt)
1371{
1372 struct udc_ep *ep;
1373 u32 tmp;
1374 unsigned long iflags;
1375 int retval = 0;
1376
1377 if (!usbep)
1378 return -EINVAL;
1379
1380 pr_debug("set_halt %s: halt=%d\n", usbep->name, halt);
1381
1382 ep = container_of(usbep, struct udc_ep, ep);
ef20a72b 1383 if (!ep->ep.desc && (ep->num != 0 && ep->num != UDC_EP0OUT_IX))
55d402d8
TD
1384 return -EINVAL;
1385 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1386 return -ESHUTDOWN;
1387
1388 spin_lock_irqsave(&udc_stall_spinlock, iflags);
1389 /* halt or clear halt */
1390 if (halt) {
1391 if (ep->num == 0)
1392 ep->dev->stall_ep0in = 1;
1393 else {
1394 /*
1395 * set STALL
1396 * rxfifo empty not taken into acount
1397 */
1398 tmp = readl(&ep->regs->ctl);
1399 tmp |= AMD_BIT(UDC_EPCTL_S);
1400 writel(tmp, &ep->regs->ctl);
1401 ep->halted = 1;
1402
1403 /* setup poll timer */
1404 if (!timer_pending(&udc_pollstall_timer)) {
1405 udc_pollstall_timer.expires = jiffies +
1406 HZ * UDC_POLLSTALL_TIMER_USECONDS
1407 / (1000 * 1000);
1408 if (!stop_pollstall_timer) {
1409 DBG(ep->dev, "start polltimer\n");
1410 add_timer(&udc_pollstall_timer);
1411 }
1412 }
1413 }
1414 } else {
1415 /* ep is halted by set_halt() before */
1416 if (ep->halted) {
1417 tmp = readl(&ep->regs->ctl);
1418 /* clear stall bit */
1419 tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
1420 /* clear NAK by writing CNAK */
1421 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1422 writel(tmp, &ep->regs->ctl);
1423 ep->halted = 0;
1424 UDC_QUEUE_CNAK(ep, ep->num);
1425 }
1426 }
1427 spin_unlock_irqrestore(&udc_stall_spinlock, iflags);
1428 return retval;
1429}
1430
1431/* gadget interface */
1432static const struct usb_ep_ops udc_ep_ops = {
1433 .enable = udc_ep_enable,
1434 .disable = udc_ep_disable,
1435
1436 .alloc_request = udc_alloc_request,
1437 .free_request = udc_free_request,
1438
1439 .queue = udc_queue,
1440 .dequeue = udc_dequeue,
1441
1442 .set_halt = udc_set_halt,
1443 /* fifo ops not implemented */
1444};
1445
1446/*-------------------------------------------------------------------------*/
1447
1448/* Get frame counter (not implemented) */
1449static int udc_get_frame(struct usb_gadget *gadget)
1450{
1451 return -EOPNOTSUPP;
1452}
1453
79a5b4aa
SM
1454/* Initiates a remote wakeup */
1455static int udc_remote_wakeup(struct udc *dev)
1456{
1457 unsigned long flags;
1458 u32 tmp;
1459
1460 DBG(dev, "UDC initiates remote wakeup\n");
1461
1462 spin_lock_irqsave(&dev->lock, flags);
1463
1464 tmp = readl(&dev->regs->ctl);
1465 tmp |= AMD_BIT(UDC_DEVCTL_RES);
1466 writel(tmp, &dev->regs->ctl);
1467 tmp &= AMD_CLEAR_BIT(UDC_DEVCTL_RES);
1468 writel(tmp, &dev->regs->ctl);
1469
1470 spin_unlock_irqrestore(&dev->lock, flags);
1471 return 0;
1472}
1473
55d402d8
TD
1474/* Remote wakeup gadget interface */
1475static int udc_wakeup(struct usb_gadget *gadget)
1476{
1477 struct udc *dev;
1478
1479 if (!gadget)
1480 return -EINVAL;
1481 dev = container_of(gadget, struct udc, gadget);
1482 udc_remote_wakeup(dev);
1483
1484 return 0;
1485}
1486
45005f69
FB
1487static int amd5536_udc_start(struct usb_gadget *g,
1488 struct usb_gadget_driver *driver);
22835b80
FB
1489static int amd5536_udc_stop(struct usb_gadget *g);
1490
55d402d8
TD
1491static const struct usb_gadget_ops udc_ops = {
1492 .wakeup = udc_wakeup,
1493 .get_frame = udc_get_frame,
45005f69
FB
1494 .udc_start = amd5536_udc_start,
1495 .udc_stop = amd5536_udc_stop,
55d402d8
TD
1496};
1497
1498/* Setups endpoint parameters, adds endpoints to linked list */
1499static void make_ep_lists(struct udc *dev)
1500{
1501 /* make gadget ep lists */
1502 INIT_LIST_HEAD(&dev->gadget.ep_list);
1503 list_add_tail(&dev->ep[UDC_EPIN_STATUS_IX].ep.ep_list,
1504 &dev->gadget.ep_list);
1505 list_add_tail(&dev->ep[UDC_EPIN_IX].ep.ep_list,
1506 &dev->gadget.ep_list);
1507 list_add_tail(&dev->ep[UDC_EPOUT_IX].ep.ep_list,
1508 &dev->gadget.ep_list);
1509
1510 /* fifo config */
1511 dev->ep[UDC_EPIN_STATUS_IX].fifo_depth = UDC_EPIN_SMALLINT_BUFF_SIZE;
1512 if (dev->gadget.speed == USB_SPEED_FULL)
1513 dev->ep[UDC_EPIN_IX].fifo_depth = UDC_FS_EPIN_BUFF_SIZE;
1514 else if (dev->gadget.speed == USB_SPEED_HIGH)
1515 dev->ep[UDC_EPIN_IX].fifo_depth = hs_tx_buf;
1516 dev->ep[UDC_EPOUT_IX].fifo_depth = UDC_RXFIFO_SIZE;
1517}
1518
1519/* init registers at driver load time */
1520static int startup_registers(struct udc *dev)
1521{
1522 u32 tmp;
1523
1524 /* init controller by soft reset */
1525 udc_soft_reset(dev);
1526
1527 /* mask not needed interrupts */
1528 udc_mask_unused_interrupts(dev);
1529
1530 /* put into initial config */
1531 udc_basic_init(dev);
1532 /* link up all endpoints */
1533 udc_setup_endpoints(dev);
1534
1535 /* program speed */
1536 tmp = readl(&dev->regs->cfg);
170b778f 1537 if (use_fullspeed)
55d402d8 1538 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
170b778f 1539 else
55d402d8 1540 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_HS, UDC_DEVCFG_SPD);
55d402d8
TD
1541 writel(tmp, &dev->regs->cfg);
1542
1543 return 0;
1544}
1545
1546/* Inits UDC context */
1547static void udc_basic_init(struct udc *dev)
1548{
1549 u32 tmp;
1550
1551 DBG(dev, "udc_basic_init()\n");
1552
1553 dev->gadget.speed = USB_SPEED_UNKNOWN;
1554
1555 /* stop RDE timer */
1556 if (timer_pending(&udc_timer)) {
1557 set_rde = 0;
1558 mod_timer(&udc_timer, jiffies - 1);
1559 }
1560 /* stop poll stall timer */
170b778f 1561 if (timer_pending(&udc_pollstall_timer))
55d402d8 1562 mod_timer(&udc_pollstall_timer, jiffies - 1);
55d402d8
TD
1563 /* disable DMA */
1564 tmp = readl(&dev->regs->ctl);
1565 tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_RDE);
1566 tmp &= AMD_UNMASK_BIT(UDC_DEVCTL_TDE);
1567 writel(tmp, &dev->regs->ctl);
1568
1569 /* enable dynamic CSR programming */
1570 tmp = readl(&dev->regs->cfg);
1571 tmp |= AMD_BIT(UDC_DEVCFG_CSR_PRG);
1572 /* set self powered */
1573 tmp |= AMD_BIT(UDC_DEVCFG_SP);
1574 /* set remote wakeupable */
1575 tmp |= AMD_BIT(UDC_DEVCFG_RWKP);
1576 writel(tmp, &dev->regs->cfg);
1577
1578 make_ep_lists(dev);
1579
1580 dev->data_ep_enabled = 0;
1581 dev->data_ep_queued = 0;
1582}
1583
1584/* Sets initial endpoint parameters */
1585static void udc_setup_endpoints(struct udc *dev)
1586{
1587 struct udc_ep *ep;
1588 u32 tmp;
1589 u32 reg;
1590
1591 DBG(dev, "udc_setup_endpoints()\n");
1592
1593 /* read enum speed */
1594 tmp = readl(&dev->regs->sts);
1595 tmp = AMD_GETBITS(tmp, UDC_DEVSTS_ENUM_SPEED);
170b778f 1596 if (tmp == UDC_DEVSTS_ENUM_SPEED_HIGH)
55d402d8 1597 dev->gadget.speed = USB_SPEED_HIGH;
170b778f 1598 else if (tmp == UDC_DEVSTS_ENUM_SPEED_FULL)
55d402d8 1599 dev->gadget.speed = USB_SPEED_FULL;
55d402d8
TD
1600
1601 /* set basic ep parameters */
1602 for (tmp = 0; tmp < UDC_EP_NUM; tmp++) {
1603 ep = &dev->ep[tmp];
1604 ep->dev = dev;
6f02ac5a
RB
1605 ep->ep.name = ep_info[tmp].name;
1606 ep->ep.caps = ep_info[tmp].caps;
55d402d8
TD
1607 ep->num = tmp;
1608 /* txfifo size is calculated at enable time */
1609 ep->txfifo = dev->txfifo;
1610
1611 /* fifo size */
1612 if (tmp < UDC_EPIN_NUM) {
1613 ep->fifo_depth = UDC_TXFIFO_SIZE;
1614 ep->in = 1;
1615 } else {
1616 ep->fifo_depth = UDC_RXFIFO_SIZE;
1617 ep->in = 0;
1618
1619 }
1620 ep->regs = &dev->ep_regs[tmp];
1621 /*
1622 * ep will be reset only if ep was not enabled before to avoid
1623 * disabling ep interrupts when ENUM interrupt occurs but ep is
1624 * not enabled by gadget driver
1625 */
ef20a72b 1626 if (!ep->ep.desc)
55d402d8 1627 ep_init(dev->regs, ep);
55d402d8
TD
1628
1629 if (use_dma) {
1630 /*
1631 * ep->dma is not really used, just to indicate that
1632 * DMA is active: remove this
1633 * dma regs = dev control regs
1634 */
1635 ep->dma = &dev->regs->ctl;
1636
1637 /* nak OUT endpoints until enable - not for ep0 */
1638 if (tmp != UDC_EP0IN_IX && tmp != UDC_EP0OUT_IX
1639 && tmp > UDC_EPIN_NUM) {
1640 /* set NAK */
1641 reg = readl(&dev->ep[tmp].regs->ctl);
1642 reg |= AMD_BIT(UDC_EPCTL_SNAK);
1643 writel(reg, &dev->ep[tmp].regs->ctl);
1644 dev->ep[tmp].naking = 1;
1645
1646 }
1647 }
1648 }
1649 /* EP0 max packet */
1650 if (dev->gadget.speed == USB_SPEED_FULL) {
e117e742
RB
1651 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
1652 UDC_FS_EP0IN_MAX_PKT_SIZE);
1653 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
1654 UDC_FS_EP0OUT_MAX_PKT_SIZE);
55d402d8 1655 } else if (dev->gadget.speed == USB_SPEED_HIGH) {
e117e742
RB
1656 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0IN_IX].ep,
1657 UDC_EP0IN_MAX_PKT_SIZE);
1658 usb_ep_set_maxpacket_limit(&dev->ep[UDC_EP0OUT_IX].ep,
1659 UDC_EP0OUT_MAX_PKT_SIZE);
55d402d8
TD
1660 }
1661
1662 /*
1663 * with suspend bug workaround, ep0 params for gadget driver
1664 * are set at gadget driver bind() call
1665 */
1666 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
1667 dev->ep[UDC_EP0IN_IX].halted = 0;
1668 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1669
1670 /* init cfg/alt/int */
1671 dev->cur_config = 0;
1672 dev->cur_intf = 0;
1673 dev->cur_alt = 0;
1674}
1675
1676/* Bringup after Connect event, initial bringup to be ready for ep0 events */
1677static void usb_connect(struct udc *dev)
1678{
1679
1680 dev_info(&dev->pdev->dev, "USB Connect\n");
1681
1682 dev->connected = 1;
1683
1684 /* put into initial config */
1685 udc_basic_init(dev);
1686
1687 /* enable device setup interrupts */
1688 udc_enable_dev_setup_interrupts(dev);
1689}
1690
1691/*
1692 * Calls gadget with disconnect event and resets the UDC and makes
1693 * initial bringup to be ready for ep0 events
1694 */
1695static void usb_disconnect(struct udc *dev)
1696{
1697
1698 dev_info(&dev->pdev->dev, "USB Disconnect\n");
1699
1700 dev->connected = 0;
1701
1702 /* mask interrupts */
1703 udc_mask_unused_interrupts(dev);
1704
1705 /* REVISIT there doesn't seem to be a point to having this
1706 * talk to a tasklet ... do it directly, we already hold
1707 * the spinlock needed to process the disconnect.
1708 */
1709
1710 tasklet_schedule(&disconnect_tasklet);
1711}
1712
1713/* Tasklet for disconnect to be outside of interrupt context */
1714static void udc_tasklet_disconnect(unsigned long par)
1715{
1716 struct udc *dev = (struct udc *)(*((struct udc **) par));
1717 u32 tmp;
1718
1719 DBG(dev, "Tasklet disconnect\n");
1720 spin_lock_irq(&dev->lock);
1721
1722 if (dev->driver) {
1723 spin_unlock(&dev->lock);
1724 dev->driver->disconnect(&dev->gadget);
1725 spin_lock(&dev->lock);
1726
1727 /* empty queues */
170b778f 1728 for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
55d402d8 1729 empty_req_queue(&dev->ep[tmp]);
55d402d8
TD
1730
1731 }
1732
1733 /* disable ep0 */
1734 ep_init(dev->regs,
1735 &dev->ep[UDC_EP0IN_IX]);
1736
1737
1738 if (!soft_reset_occured) {
1739 /* init controller by soft reset */
1740 udc_soft_reset(dev);
1741 soft_reset_occured++;
1742 }
1743
1744 /* re-enable dev interrupts */
1745 udc_enable_dev_setup_interrupts(dev);
1746 /* back to full speed ? */
1747 if (use_fullspeed) {
1748 tmp = readl(&dev->regs->cfg);
1749 tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
1750 writel(tmp, &dev->regs->cfg);
1751 }
1752
1753 spin_unlock_irq(&dev->lock);
1754}
1755
1756/* Reset the UDC core */
1757static void udc_soft_reset(struct udc *dev)
1758{
1759 unsigned long flags;
1760
1761 DBG(dev, "Soft reset\n");
1762 /*
1763 * reset possible waiting interrupts, because int.
1764 * status is lost after soft reset,
1765 * ep int. status reset
1766 */
1767 writel(UDC_EPINT_MSK_DISABLE_ALL, &dev->regs->ep_irqsts);
1768 /* device int. status reset */
1769 writel(UDC_DEV_MSK_DISABLE, &dev->regs->irqsts);
1770
1771 spin_lock_irqsave(&udc_irq_spinlock, flags);
1772 writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
1773 readl(&dev->regs->cfg);
1774 spin_unlock_irqrestore(&udc_irq_spinlock, flags);
1775
1776}
1777
1778/* RDE timer callback to set RDE bit */
1779static void udc_timer_function(unsigned long v)
1780{
1781 u32 tmp;
1782
1783 spin_lock_irq(&udc_irq_spinlock);
1784
1785 if (set_rde > 0) {
1786 /*
1787 * open the fifo if fifo was filled on last timer call
1788 * conditionally
1789 */
1790 if (set_rde > 1) {
1791 /* set RDE to receive setup data */
1792 tmp = readl(&udc->regs->ctl);
1793 tmp |= AMD_BIT(UDC_DEVCTL_RDE);
1794 writel(tmp, &udc->regs->ctl);
1795 set_rde = -1;
1796 } else if (readl(&udc->regs->sts)
1797 & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY)) {
1798 /*
1799 * if fifo empty setup polling, do not just
1800 * open the fifo
1801 */
1802 udc_timer.expires = jiffies + HZ/UDC_RDE_TIMER_DIV;
170b778f 1803 if (!stop_timer)
55d402d8 1804 add_timer(&udc_timer);
55d402d8
TD
1805 } else {
1806 /*
1807 * fifo contains data now, setup timer for opening
1808 * the fifo when timer expires to be able to receive
1809 * setup packets, when data packets gets queued by
1810 * gadget layer then timer will forced to expire with
1811 * set_rde=0 (RDE is set in udc_queue())
1812 */
1813 set_rde++;
1814 /* debug: lhadmot_timer_start = 221070 */
1815 udc_timer.expires = jiffies + HZ*UDC_RDE_TIMER_SECONDS;
170b778f 1816 if (!stop_timer)
55d402d8 1817 add_timer(&udc_timer);
55d402d8
TD
1818 }
1819
1820 } else
1821 set_rde = -1; /* RDE was set by udc_queue() */
1822 spin_unlock_irq(&udc_irq_spinlock);
1823 if (stop_timer)
1824 complete(&on_exit);
1825
1826}
1827
1828/* Handle halt state, used in stall poll timer */
1829static void udc_handle_halt_state(struct udc_ep *ep)
1830{
1831 u32 tmp;
1832 /* set stall as long not halted */
1833 if (ep->halted == 1) {
1834 tmp = readl(&ep->regs->ctl);
1835 /* STALL cleared ? */
1836 if (!(tmp & AMD_BIT(UDC_EPCTL_S))) {
1837 /*
1838 * FIXME: MSC spec requires that stall remains
1839 * even on receivng of CLEAR_FEATURE HALT. So
1840 * we would set STALL again here to be compliant.
1841 * But with current mass storage drivers this does
1842 * not work (would produce endless host retries).
1843 * So we clear halt on CLEAR_FEATURE.
1844 *
1845 DBG(ep->dev, "ep %d: set STALL again\n", ep->num);
1846 tmp |= AMD_BIT(UDC_EPCTL_S);
1847 writel(tmp, &ep->regs->ctl);*/
1848
1849 /* clear NAK by writing CNAK */
1850 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1851 writel(tmp, &ep->regs->ctl);
1852 ep->halted = 0;
1853 UDC_QUEUE_CNAK(ep, ep->num);
1854 }
1855 }
1856}
1857
1858/* Stall timer callback to poll S bit and set it again after */
1859static void udc_pollstall_timer_function(unsigned long v)
1860{
1861 struct udc_ep *ep;
1862 int halted = 0;
1863
1864 spin_lock_irq(&udc_stall_spinlock);
1865 /*
1866 * only one IN and OUT endpoints are handled
1867 * IN poll stall
1868 */
1869 ep = &udc->ep[UDC_EPIN_IX];
1870 udc_handle_halt_state(ep);
1871 if (ep->halted)
1872 halted = 1;
1873 /* OUT poll stall */
1874 ep = &udc->ep[UDC_EPOUT_IX];
1875 udc_handle_halt_state(ep);
1876 if (ep->halted)
1877 halted = 1;
1878
1879 /* setup timer again when still halted */
1880 if (!stop_pollstall_timer && halted) {
1881 udc_pollstall_timer.expires = jiffies +
1882 HZ * UDC_POLLSTALL_TIMER_USECONDS
1883 / (1000 * 1000);
1884 add_timer(&udc_pollstall_timer);
1885 }
1886 spin_unlock_irq(&udc_stall_spinlock);
1887
1888 if (stop_pollstall_timer)
1889 complete(&on_pollstall_exit);
1890}
1891
1892/* Inits endpoint 0 so that SETUP packets are processed */
1893static void activate_control_endpoints(struct udc *dev)
1894{
1895 u32 tmp;
1896
1897 DBG(dev, "activate_control_endpoints\n");
1898
1899 /* flush fifo */
1900 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1901 tmp |= AMD_BIT(UDC_EPCTL_F);
1902 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1903
1904 /* set ep0 directions */
1905 dev->ep[UDC_EP0IN_IX].in = 1;
1906 dev->ep[UDC_EP0OUT_IX].in = 0;
1907
1908 /* set buffer size (tx fifo entries) of EP0_IN */
1909 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1910 if (dev->gadget.speed == USB_SPEED_FULL)
1911 tmp = AMD_ADDBITS(tmp, UDC_FS_EPIN0_BUFF_SIZE,
1912 UDC_EPIN_BUFF_SIZE);
1913 else if (dev->gadget.speed == USB_SPEED_HIGH)
1914 tmp = AMD_ADDBITS(tmp, UDC_EPIN0_BUFF_SIZE,
1915 UDC_EPIN_BUFF_SIZE);
1916 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufin_framenum);
1917
1918 /* set max packet size of EP0_IN */
1919 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1920 if (dev->gadget.speed == USB_SPEED_FULL)
1921 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0IN_MAX_PKT_SIZE,
1922 UDC_EP_MAX_PKT_SIZE);
1923 else if (dev->gadget.speed == USB_SPEED_HIGH)
1924 tmp = AMD_ADDBITS(tmp, UDC_EP0IN_MAX_PKT_SIZE,
1925 UDC_EP_MAX_PKT_SIZE);
1926 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->bufout_maxpkt);
1927
1928 /* set max packet size of EP0_OUT */
1929 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1930 if (dev->gadget.speed == USB_SPEED_FULL)
1931 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1932 UDC_EP_MAX_PKT_SIZE);
1933 else if (dev->gadget.speed == USB_SPEED_HIGH)
1934 tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1935 UDC_EP_MAX_PKT_SIZE);
1936 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->bufout_maxpkt);
1937
1938 /* set max packet size of EP0 in UDC CSR */
1939 tmp = readl(&dev->csr->ne[0]);
1940 if (dev->gadget.speed == USB_SPEED_FULL)
1941 tmp = AMD_ADDBITS(tmp, UDC_FS_EP0OUT_MAX_PKT_SIZE,
1942 UDC_CSR_NE_MAX_PKT);
1943 else if (dev->gadget.speed == USB_SPEED_HIGH)
1944 tmp = AMD_ADDBITS(tmp, UDC_EP0OUT_MAX_PKT_SIZE,
1945 UDC_CSR_NE_MAX_PKT);
1946 writel(tmp, &dev->csr->ne[0]);
1947
1948 if (use_dma) {
1949 dev->ep[UDC_EP0OUT_IX].td->status |=
1950 AMD_BIT(UDC_DMA_OUT_STS_L);
1951 /* write dma desc address */
1952 writel(dev->ep[UDC_EP0OUT_IX].td_stp_dma,
1953 &dev->ep[UDC_EP0OUT_IX].regs->subptr);
1954 writel(dev->ep[UDC_EP0OUT_IX].td_phys,
1955 &dev->ep[UDC_EP0OUT_IX].regs->desptr);
1956 /* stop RDE timer */
1957 if (timer_pending(&udc_timer)) {
1958 set_rde = 0;
1959 mod_timer(&udc_timer, jiffies - 1);
1960 }
1961 /* stop pollstall timer */
170b778f 1962 if (timer_pending(&udc_pollstall_timer))
55d402d8 1963 mod_timer(&udc_pollstall_timer, jiffies - 1);
55d402d8
TD
1964 /* enable DMA */
1965 tmp = readl(&dev->regs->ctl);
1966 tmp |= AMD_BIT(UDC_DEVCTL_MODE)
1967 | AMD_BIT(UDC_DEVCTL_RDE)
1968 | AMD_BIT(UDC_DEVCTL_TDE);
170b778f 1969 if (use_dma_bufferfill_mode)
55d402d8 1970 tmp |= AMD_BIT(UDC_DEVCTL_BF);
170b778f 1971 else if (use_dma_ppb_du)
55d402d8 1972 tmp |= AMD_BIT(UDC_DEVCTL_DU);
55d402d8
TD
1973 writel(tmp, &dev->regs->ctl);
1974 }
1975
1976 /* clear NAK by writing CNAK for EP0IN */
1977 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
1978 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1979 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
1980 dev->ep[UDC_EP0IN_IX].naking = 0;
1981 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
1982
1983 /* clear NAK by writing CNAK for EP0OUT */
1984 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
1985 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
1986 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
1987 dev->ep[UDC_EP0OUT_IX].naking = 0;
1988 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
1989}
1990
1991/* Make endpoint 0 ready for control traffic */
1992static int setup_ep0(struct udc *dev)
1993{
1994 activate_control_endpoints(dev);
1995 /* enable ep0 interrupts */
1996 udc_enable_ep0_interrupts(dev);
1997 /* enable device setup interrupts */
1998 udc_enable_dev_setup_interrupts(dev);
1999
2000 return 0;
2001}
2002
2003/* Called by gadget driver to register itself */
45005f69
FB
2004static int amd5536_udc_start(struct usb_gadget *g,
2005 struct usb_gadget_driver *driver)
55d402d8 2006{
45005f69 2007 struct udc *dev = to_amd5536_udc(g);
55d402d8
TD
2008 u32 tmp;
2009
55d402d8
TD
2010 driver->driver.bus = NULL;
2011 dev->driver = driver;
55d402d8 2012
55d402d8
TD
2013 /* Some gadget drivers use both ep0 directions.
2014 * NOTE: to gadget driver, ep0 is just one endpoint...
2015 */
2016 dev->ep[UDC_EP0OUT_IX].ep.driver_data =
2017 dev->ep[UDC_EP0IN_IX].ep.driver_data;
2018
55d402d8
TD
2019 /* get ready for ep0 traffic */
2020 setup_ep0(dev);
2021
2022 /* clear SD */
2023 tmp = readl(&dev->regs->ctl);
2024 tmp = tmp & AMD_CLEAR_BIT(UDC_DEVCTL_SD);
2025 writel(tmp, &dev->regs->ctl);
2026
2027 usb_connect(dev);
2028
2029 return 0;
2030}
55d402d8
TD
2031
2032/* shutdown requests and disconnect from gadget */
2033static void
2034shutdown(struct udc *dev, struct usb_gadget_driver *driver)
2035__releases(dev->lock)
2036__acquires(dev->lock)
2037{
2038 int tmp;
2039
c5deb832
TD
2040 /* empty queues and init hardware */
2041 udc_basic_init(dev);
45005f69 2042
c5deb832
TD
2043 for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
2044 empty_req_queue(&dev->ep[tmp]);
2045
55d402d8
TD
2046 udc_setup_endpoints(dev);
2047}
2048
2049/* Called by gadget driver to unregister itself */
22835b80 2050static int amd5536_udc_stop(struct usb_gadget *g)
55d402d8 2051{
45005f69
FB
2052 struct udc *dev = to_amd5536_udc(g);
2053 unsigned long flags;
55d402d8
TD
2054 u32 tmp;
2055
55d402d8
TD
2056 spin_lock_irqsave(&dev->lock, flags);
2057 udc_mask_unused_interrupts(dev);
21090f06 2058 shutdown(dev, NULL);
55d402d8
TD
2059 spin_unlock_irqrestore(&dev->lock, flags);
2060
55d402d8
TD
2061 dev->driver = NULL;
2062
2063 /* set SD */
2064 tmp = readl(&dev->regs->ctl);
2065 tmp |= AMD_BIT(UDC_DEVCTL_SD);
2066 writel(tmp, &dev->regs->ctl);
2067
55d402d8
TD
2068 return 0;
2069}
55d402d8
TD
2070
2071/* Clear pending NAK bits */
2072static void udc_process_cnak_queue(struct udc *dev)
2073{
2074 u32 tmp;
2075 u32 reg;
2076
2077 /* check epin's */
2078 DBG(dev, "CNAK pending queue processing\n");
2079 for (tmp = 0; tmp < UDC_EPIN_NUM_USED; tmp++) {
2080 if (cnak_pending & (1 << tmp)) {
2081 DBG(dev, "CNAK pending for ep%d\n", tmp);
2082 /* clear NAK by writing CNAK */
2083 reg = readl(&dev->ep[tmp].regs->ctl);
2084 reg |= AMD_BIT(UDC_EPCTL_CNAK);
2085 writel(reg, &dev->ep[tmp].regs->ctl);
2086 dev->ep[tmp].naking = 0;
2087 UDC_QUEUE_CNAK(&dev->ep[tmp], dev->ep[tmp].num);
2088 }
2089 }
2090 /* ... and ep0out */
2091 if (cnak_pending & (1 << UDC_EP0OUT_IX)) {
2092 DBG(dev, "CNAK pending for ep%d\n", UDC_EP0OUT_IX);
2093 /* clear NAK by writing CNAK */
2094 reg = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2095 reg |= AMD_BIT(UDC_EPCTL_CNAK);
2096 writel(reg, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2097 dev->ep[UDC_EP0OUT_IX].naking = 0;
2098 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX],
2099 dev->ep[UDC_EP0OUT_IX].num);
2100 }
2101}
2102
2103/* Enabling RX DMA after setup packet */
2104static void udc_ep0_set_rde(struct udc *dev)
2105{
2106 if (use_dma) {
2107 /*
2108 * only enable RXDMA when no data endpoint enabled
2109 * or data is queued
2110 */
2111 if (!dev->data_ep_enabled || dev->data_ep_queued) {
2112 udc_set_rde(dev);
2113 } else {
2114 /*
2115 * setup timer for enabling RDE (to not enable
2116 * RXFIFO DMA for data endpoints to early)
2117 */
2118 if (set_rde != 0 && !timer_pending(&udc_timer)) {
2119 udc_timer.expires =
2120 jiffies + HZ/UDC_RDE_TIMER_DIV;
2121 set_rde = 1;
170b778f 2122 if (!stop_timer)
55d402d8 2123 add_timer(&udc_timer);
55d402d8
TD
2124 }
2125 }
2126 }
2127}
2128
2129
2130/* Interrupt handler for data OUT traffic */
2131static irqreturn_t udc_data_out_isr(struct udc *dev, int ep_ix)
2132{
2133 irqreturn_t ret_val = IRQ_NONE;
2134 u32 tmp;
2135 struct udc_ep *ep;
2136 struct udc_request *req;
2137 unsigned int count;
2138 struct udc_data_dma *td = NULL;
2139 unsigned dma_done;
2140
2141 VDBG(dev, "ep%d irq\n", ep_ix);
2142 ep = &dev->ep[ep_ix];
2143
2144 tmp = readl(&ep->regs->sts);
2145 if (use_dma) {
2146 /* BNA event ? */
2147 if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
5647a149 2148 DBG(dev, "BNA ep%dout occurred - DESPTR = %x\n",
55d402d8
TD
2149 ep->num, readl(&ep->regs->desptr));
2150 /* clear BNA */
2151 writel(tmp | AMD_BIT(UDC_EPSTS_BNA), &ep->regs->sts);
2152 if (!ep->cancel_transfer)
2153 ep->bna_occurred = 1;
2154 else
2155 ep->cancel_transfer = 0;
2156 ret_val = IRQ_HANDLED;
2157 goto finished;
2158 }
2159 }
2160 /* HE event ? */
2161 if (tmp & AMD_BIT(UDC_EPSTS_HE)) {
25985edc 2162 dev_err(&dev->pdev->dev, "HE ep%dout occurred\n", ep->num);
55d402d8
TD
2163
2164 /* clear HE */
2165 writel(tmp | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2166 ret_val = IRQ_HANDLED;
2167 goto finished;
2168 }
2169
2170 if (!list_empty(&ep->queue)) {
2171
2172 /* next request */
2173 req = list_entry(ep->queue.next,
2174 struct udc_request, queue);
2175 } else {
2176 req = NULL;
2177 udc_rxfifo_pending = 1;
2178 }
2179 VDBG(dev, "req = %p\n", req);
2180 /* fifo mode */
2181 if (!use_dma) {
2182
2183 /* read fifo */
2184 if (req && udc_rxfifo_read(ep, req)) {
2185 ret_val = IRQ_HANDLED;
2186
2187 /* finish */
2188 complete_req(ep, req, 0);
2189 /* next request */
2190 if (!list_empty(&ep->queue) && !ep->halted) {
2191 req = list_entry(ep->queue.next,
2192 struct udc_request, queue);
2193 } else
2194 req = NULL;
2195 }
2196
2197 /* DMA */
2198 } else if (!ep->cancel_transfer && req != NULL) {
2199 ret_val = IRQ_HANDLED;
2200
2201 /* check for DMA done */
2202 if (!use_dma_ppb) {
2203 dma_done = AMD_GETBITS(req->td_data->status,
2204 UDC_DMA_OUT_STS_BS);
2205 /* packet per buffer mode - rx bytes */
2206 } else {
2207 /*
2208 * if BNA occurred then recover desc. from
2209 * BNA dummy desc.
2210 */
2211 if (ep->bna_occurred) {
2212 VDBG(dev, "Recover desc. from BNA dummy\n");
2213 memcpy(req->td_data, ep->bna_dummy_req->td_data,
2214 sizeof(struct udc_data_dma));
2215 ep->bna_occurred = 0;
2216 udc_init_bna_dummy(ep->req);
2217 }
2218 td = udc_get_last_dma_desc(req);
2219 dma_done = AMD_GETBITS(td->status, UDC_DMA_OUT_STS_BS);
2220 }
2221 if (dma_done == UDC_DMA_OUT_STS_BS_DMA_DONE) {
2222 /* buffer fill mode - rx bytes */
2223 if (!use_dma_ppb) {
2224 /* received number bytes */
2225 count = AMD_GETBITS(req->td_data->status,
2226 UDC_DMA_OUT_STS_RXBYTES);
2227 VDBG(dev, "rx bytes=%u\n", count);
2228 /* packet per buffer mode - rx bytes */
2229 } else {
2230 VDBG(dev, "req->td_data=%p\n", req->td_data);
2231 VDBG(dev, "last desc = %p\n", td);
2232 /* received number bytes */
2233 if (use_dma_ppb_du) {
2234 /* every desc. counts bytes */
2235 count = udc_get_ppbdu_rxbytes(req);
2236 } else {
2237 /* last desc. counts bytes */
2238 count = AMD_GETBITS(td->status,
2239 UDC_DMA_OUT_STS_RXBYTES);
2240 if (!count && req->req.length
2241 == UDC_DMA_MAXPACKET) {
2242 /*
2243 * on 64k packets the RXBYTES
2244 * field is zero
2245 */
2246 count = UDC_DMA_MAXPACKET;
2247 }
2248 }
2249 VDBG(dev, "last desc rx bytes=%u\n", count);
2250 }
2251
2252 tmp = req->req.length - req->req.actual;
2253 if (count > tmp) {
2254 if ((tmp % ep->ep.maxpacket) != 0) {
2255 DBG(dev, "%s: rx %db, space=%db\n",
2256 ep->ep.name, count, tmp);
2257 req->req.status = -EOVERFLOW;
2258 }
2259 count = tmp;
2260 }
2261 req->req.actual += count;
2262 req->dma_going = 0;
2263 /* complete request */
2264 complete_req(ep, req, 0);
2265
2266 /* next request */
2267 if (!list_empty(&ep->queue) && !ep->halted) {
2268 req = list_entry(ep->queue.next,
2269 struct udc_request,
2270 queue);
2271 /*
2272 * DMA may be already started by udc_queue()
2273 * called by gadget drivers completion
2274 * routine. This happens when queue
2275 * holds one request only.
2276 */
2277 if (req->dma_going == 0) {
2278 /* next dma */
2279 if (prep_dma(ep, req, GFP_ATOMIC) != 0)
2280 goto finished;
2281 /* write desc pointer */
2282 writel(req->td_phys,
2283 &ep->regs->desptr);
2284 req->dma_going = 1;
2285 /* enable DMA */
2286 udc_set_rde(dev);
2287 }
2288 } else {
2289 /*
2290 * implant BNA dummy descriptor to allow
2291 * RXFIFO opening by RDE
2292 */
2293 if (ep->bna_dummy_req) {
2294 /* write desc pointer */
2295 writel(ep->bna_dummy_req->td_phys,
2296 &ep->regs->desptr);
2297 ep->bna_occurred = 0;
2298 }
2299
2300 /*
2301 * schedule timer for setting RDE if queue
2302 * remains empty to allow ep0 packets pass
2303 * through
2304 */
2305 if (set_rde != 0
2306 && !timer_pending(&udc_timer)) {
2307 udc_timer.expires =
2308 jiffies
2309 + HZ*UDC_RDE_TIMER_SECONDS;
2310 set_rde = 1;
170b778f 2311 if (!stop_timer)
55d402d8 2312 add_timer(&udc_timer);
55d402d8
TD
2313 }
2314 if (ep->num != UDC_EP0OUT_IX)
2315 dev->data_ep_queued = 0;
2316 }
2317
2318 } else {
2319 /*
2320 * RX DMA must be reenabled for each desc in PPBDU mode
2321 * and must be enabled for PPBNDU mode in case of BNA
2322 */
2323 udc_set_rde(dev);
2324 }
2325
2326 } else if (ep->cancel_transfer) {
2327 ret_val = IRQ_HANDLED;
2328 ep->cancel_transfer = 0;
2329 }
2330
2331 /* check pending CNAKS */
2332 if (cnak_pending) {
2333 /* CNAk processing when rxfifo empty only */
170b778f 2334 if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
55d402d8 2335 udc_process_cnak_queue(dev);
55d402d8
TD
2336 }
2337
2338 /* clear OUT bits in ep status */
2339 writel(UDC_EPSTS_OUT_CLEAR, &ep->regs->sts);
2340finished:
2341 return ret_val;
2342}
2343
2344/* Interrupt handler for data IN traffic */
2345static irqreturn_t udc_data_in_isr(struct udc *dev, int ep_ix)
2346{
2347 irqreturn_t ret_val = IRQ_NONE;
2348 u32 tmp;
2349 u32 epsts;
2350 struct udc_ep *ep;
2351 struct udc_request *req;
2352 struct udc_data_dma *td;
2353 unsigned dma_done;
2354 unsigned len;
2355
2356 ep = &dev->ep[ep_ix];
2357
2358 epsts = readl(&ep->regs->sts);
2359 if (use_dma) {
2360 /* BNA ? */
2361 if (epsts & AMD_BIT(UDC_EPSTS_BNA)) {
2362 dev_err(&dev->pdev->dev,
5647a149 2363 "BNA ep%din occurred - DESPTR = %08lx\n",
55d402d8
TD
2364 ep->num,
2365 (unsigned long) readl(&ep->regs->desptr));
2366
2367 /* clear BNA */
2368 writel(epsts, &ep->regs->sts);
2369 ret_val = IRQ_HANDLED;
2370 goto finished;
2371 }
2372 }
2373 /* HE event ? */
2374 if (epsts & AMD_BIT(UDC_EPSTS_HE)) {
2375 dev_err(&dev->pdev->dev,
5647a149 2376 "HE ep%dn occurred - DESPTR = %08lx\n",
55d402d8
TD
2377 ep->num, (unsigned long) readl(&ep->regs->desptr));
2378
2379 /* clear HE */
2380 writel(epsts | AMD_BIT(UDC_EPSTS_HE), &ep->regs->sts);
2381 ret_val = IRQ_HANDLED;
2382 goto finished;
2383 }
2384
2385 /* DMA completion */
2386 if (epsts & AMD_BIT(UDC_EPSTS_TDC)) {
2387 VDBG(dev, "TDC set- completion\n");
2388 ret_val = IRQ_HANDLED;
2389 if (!ep->cancel_transfer && !list_empty(&ep->queue)) {
2390 req = list_entry(ep->queue.next,
2391 struct udc_request, queue);
058e698b 2392 /*
25985edc 2393 * length bytes transferred
058e698b
JL
2394 * check dma done of last desc. in PPBDU mode
2395 */
2396 if (use_dma_ppb_du) {
2397 td = udc_get_last_dma_desc(req);
2398 if (td) {
2399 dma_done =
2400 AMD_GETBITS(td->status,
2401 UDC_DMA_IN_STS_BS);
2402 /* don't care DMA done */
55d402d8
TD
2403 req->req.actual = req->req.length;
2404 }
058e698b
JL
2405 } else {
2406 /* assume all bytes transferred */
2407 req->req.actual = req->req.length;
2408 }
55d402d8 2409
058e698b
JL
2410 if (req->req.actual == req->req.length) {
2411 /* complete req */
2412 complete_req(ep, req, 0);
2413 req->dma_going = 0;
2414 /* further request available ? */
2415 if (list_empty(&ep->queue)) {
2416 /* disable interrupt */
2417 tmp = readl(&dev->regs->ep_irqmsk);
2418 tmp |= AMD_BIT(ep->num);
2419 writel(tmp, &dev->regs->ep_irqmsk);
55d402d8
TD
2420 }
2421 }
2422 }
2423 ep->cancel_transfer = 0;
2424
2425 }
2426 /*
2427 * status reg has IN bit set and TDC not set (if TDC was handled,
2428 * IN must not be handled (UDC defect) ?
2429 */
2430 if ((epsts & AMD_BIT(UDC_EPSTS_IN))
2431 && !(epsts & AMD_BIT(UDC_EPSTS_TDC))) {
2432 ret_val = IRQ_HANDLED;
2433 if (!list_empty(&ep->queue)) {
2434 /* next request */
2435 req = list_entry(ep->queue.next,
2436 struct udc_request, queue);
2437 /* FIFO mode */
2438 if (!use_dma) {
2439 /* write fifo */
2440 udc_txfifo_write(ep, &req->req);
2441 len = req->req.length - req->req.actual;
1435db48
CR
2442 if (len > ep->ep.maxpacket)
2443 len = ep->ep.maxpacket;
2444 req->req.actual += len;
55d402d8
TD
2445 if (req->req.actual == req->req.length
2446 || (len != ep->ep.maxpacket)) {
2447 /* complete req */
2448 complete_req(ep, req, 0);
2449 }
2450 /* DMA */
2451 } else if (req && !req->dma_going) {
2452 VDBG(dev, "IN DMA : req=%p req->td_data=%p\n",
2453 req, req->td_data);
2454 if (req->td_data) {
2455
2456 req->dma_going = 1;
2457
2458 /*
2459 * unset L bit of first desc.
2460 * for chain
2461 */
2462 if (use_dma_ppb && req->req.length >
2463 ep->ep.maxpacket) {
2464 req->td_data->status &=
2465 AMD_CLEAR_BIT(
2466 UDC_DMA_IN_STS_L);
2467 }
2468
2469 /* write desc pointer */
2470 writel(req->td_phys, &ep->regs->desptr);
2471
2472 /* set HOST READY */
2473 req->td_data->status =
2474 AMD_ADDBITS(
2475 req->td_data->status,
2476 UDC_DMA_IN_STS_BS_HOST_READY,
2477 UDC_DMA_IN_STS_BS);
2478
2479 /* set poll demand bit */
2480 tmp = readl(&ep->regs->ctl);
2481 tmp |= AMD_BIT(UDC_EPCTL_P);
2482 writel(tmp, &ep->regs->ctl);
2483 }
2484 }
2485
c5deb832
TD
2486 } else if (!use_dma && ep->in) {
2487 /* disable interrupt */
2488 tmp = readl(
2489 &dev->regs->ep_irqmsk);
2490 tmp |= AMD_BIT(ep->num);
2491 writel(tmp,
2492 &dev->regs->ep_irqmsk);
55d402d8
TD
2493 }
2494 }
2495 /* clear status bits */
2496 writel(epsts, &ep->regs->sts);
2497
2498finished:
2499 return ret_val;
2500
2501}
2502
2503/* Interrupt handler for Control OUT traffic */
2504static irqreturn_t udc_control_out_isr(struct udc *dev)
2505__releases(dev->lock)
2506__acquires(dev->lock)
2507{
2508 irqreturn_t ret_val = IRQ_NONE;
2509 u32 tmp;
2510 int setup_supported;
2511 u32 count;
2512 int set = 0;
2513 struct udc_ep *ep;
2514 struct udc_ep *ep_tmp;
2515
2516 ep = &dev->ep[UDC_EP0OUT_IX];
2517
2518 /* clear irq */
2519 writel(AMD_BIT(UDC_EPINT_OUT_EP0), &dev->regs->ep_irqsts);
2520
2521 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2522 /* check BNA and clear if set */
2523 if (tmp & AMD_BIT(UDC_EPSTS_BNA)) {
2524 VDBG(dev, "ep0: BNA set\n");
2525 writel(AMD_BIT(UDC_EPSTS_BNA),
2526 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2527 ep->bna_occurred = 1;
2528 ret_val = IRQ_HANDLED;
2529 goto finished;
2530 }
2531
2532 /* type of data: SETUP or DATA 0 bytes */
2533 tmp = AMD_GETBITS(tmp, UDC_EPSTS_OUT);
2534 VDBG(dev, "data_typ = %x\n", tmp);
2535
2536 /* setup data */
2537 if (tmp == UDC_EPSTS_OUT_SETUP) {
2538 ret_val = IRQ_HANDLED;
2539
2540 ep->dev->stall_ep0in = 0;
2541 dev->waiting_zlp_ack_ep0in = 0;
2542
2543 /* set NAK for EP0_IN */
2544 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2545 tmp |= AMD_BIT(UDC_EPCTL_SNAK);
2546 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2547 dev->ep[UDC_EP0IN_IX].naking = 1;
2548 /* get setup data */
2549 if (use_dma) {
2550
2551 /* clear OUT bits in ep status */
2552 writel(UDC_EPSTS_OUT_CLEAR,
2553 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2554
2555 setup_data.data[0] =
2556 dev->ep[UDC_EP0OUT_IX].td_stp->data12;
2557 setup_data.data[1] =
2558 dev->ep[UDC_EP0OUT_IX].td_stp->data34;
2559 /* set HOST READY */
2560 dev->ep[UDC_EP0OUT_IX].td_stp->status =
2561 UDC_DMA_STP_STS_BS_HOST_READY;
2562 } else {
2563 /* read fifo */
2564 udc_rxfifo_read_dwords(dev, setup_data.data, 2);
2565 }
2566
2567 /* determine direction of control data */
2568 if ((setup_data.request.bRequestType & USB_DIR_IN) != 0) {
2569 dev->gadget.ep0 = &dev->ep[UDC_EP0IN_IX].ep;
2570 /* enable RDE */
2571 udc_ep0_set_rde(dev);
2572 set = 0;
2573 } else {
2574 dev->gadget.ep0 = &dev->ep[UDC_EP0OUT_IX].ep;
2575 /*
2576 * implant BNA dummy descriptor to allow RXFIFO opening
2577 * by RDE
2578 */
2579 if (ep->bna_dummy_req) {
2580 /* write desc pointer */
2581 writel(ep->bna_dummy_req->td_phys,
2582 &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2583 ep->bna_occurred = 0;
2584 }
2585
2586 set = 1;
2587 dev->ep[UDC_EP0OUT_IX].naking = 1;
2588 /*
2589 * setup timer for enabling RDE (to not enable
2590 * RXFIFO DMA for data to early)
2591 */
2592 set_rde = 1;
2593 if (!timer_pending(&udc_timer)) {
2594 udc_timer.expires = jiffies +
2595 HZ/UDC_RDE_TIMER_DIV;
170b778f 2596 if (!stop_timer)
55d402d8 2597 add_timer(&udc_timer);
55d402d8
TD
2598 }
2599 }
2600
2601 /*
2602 * mass storage reset must be processed here because
2603 * next packet may be a CLEAR_FEATURE HALT which would not
2604 * clear the stall bit when no STALL handshake was received
2605 * before (autostall can cause this)
2606 */
2607 if (setup_data.data[0] == UDC_MSCRES_DWORD0
2608 && setup_data.data[1] == UDC_MSCRES_DWORD1) {
2609 DBG(dev, "MSC Reset\n");
2610 /*
2611 * clear stall bits
2612 * only one IN and OUT endpoints are handled
2613 */
2614 ep_tmp = &udc->ep[UDC_EPIN_IX];
2615 udc_set_halt(&ep_tmp->ep, 0);
2616 ep_tmp = &udc->ep[UDC_EPOUT_IX];
2617 udc_set_halt(&ep_tmp->ep, 0);
2618 }
2619
2620 /* call gadget with setup data received */
2621 spin_unlock(&dev->lock);
2622 setup_supported = dev->driver->setup(&dev->gadget,
2623 &setup_data.request);
2624 spin_lock(&dev->lock);
2625
2626 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2627 /* ep0 in returns data (not zlp) on IN phase */
2628 if (setup_supported >= 0 && setup_supported <
2629 UDC_EP0IN_MAXPACKET) {
2630 /* clear NAK by writing CNAK in EP0_IN */
2631 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2632 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2633 dev->ep[UDC_EP0IN_IX].naking = 0;
2634 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0IN_IX], UDC_EP0IN_IX);
2635
2636 /* if unsupported request then stall */
2637 } else if (setup_supported < 0) {
2638 tmp |= AMD_BIT(UDC_EPCTL_S);
2639 writel(tmp, &dev->ep[UDC_EP0IN_IX].regs->ctl);
2640 } else
2641 dev->waiting_zlp_ack_ep0in = 1;
2642
2643
2644 /* clear NAK by writing CNAK in EP0_OUT */
2645 if (!set) {
2646 tmp = readl(&dev->ep[UDC_EP0OUT_IX].regs->ctl);
2647 tmp |= AMD_BIT(UDC_EPCTL_CNAK);
2648 writel(tmp, &dev->ep[UDC_EP0OUT_IX].regs->ctl);
2649 dev->ep[UDC_EP0OUT_IX].naking = 0;
2650 UDC_QUEUE_CNAK(&dev->ep[UDC_EP0OUT_IX], UDC_EP0OUT_IX);
2651 }
2652
2653 if (!use_dma) {
2654 /* clear OUT bits in ep status */
2655 writel(UDC_EPSTS_OUT_CLEAR,
2656 &dev->ep[UDC_EP0OUT_IX].regs->sts);
2657 }
2658
2659 /* data packet 0 bytes */
2660 } else if (tmp == UDC_EPSTS_OUT_DATA) {
2661 /* clear OUT bits in ep status */
2662 writel(UDC_EPSTS_OUT_CLEAR, &dev->ep[UDC_EP0OUT_IX].regs->sts);
2663
2664 /* get setup data: only 0 packet */
2665 if (use_dma) {
2666 /* no req if 0 packet, just reactivate */
2667 if (list_empty(&dev->ep[UDC_EP0OUT_IX].queue)) {
2668 VDBG(dev, "ZLP\n");
2669
2670 /* set HOST READY */
2671 dev->ep[UDC_EP0OUT_IX].td->status =
2672 AMD_ADDBITS(
2673 dev->ep[UDC_EP0OUT_IX].td->status,
2674 UDC_DMA_OUT_STS_BS_HOST_READY,
2675 UDC_DMA_OUT_STS_BS);
2676 /* enable RDE */
2677 udc_ep0_set_rde(dev);
2678 ret_val = IRQ_HANDLED;
2679
2680 } else {
2681 /* control write */
2682 ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2683 /* re-program desc. pointer for possible ZLPs */
2684 writel(dev->ep[UDC_EP0OUT_IX].td_phys,
2685 &dev->ep[UDC_EP0OUT_IX].regs->desptr);
2686 /* enable RDE */
2687 udc_ep0_set_rde(dev);
2688 }
2689 } else {
2690
2691 /* received number bytes */
2692 count = readl(&dev->ep[UDC_EP0OUT_IX].regs->sts);
2693 count = AMD_GETBITS(count, UDC_EPSTS_RX_PKT_SIZE);
2694 /* out data for fifo mode not working */
2695 count = 0;
2696
2697 /* 0 packet or real data ? */
2698 if (count != 0) {
2699 ret_val |= udc_data_out_isr(dev, UDC_EP0OUT_IX);
2700 } else {
2701 /* dummy read confirm */
2702 readl(&dev->ep[UDC_EP0OUT_IX].regs->confirm);
2703 ret_val = IRQ_HANDLED;
2704 }
2705 }
2706 }
2707
2708 /* check pending CNAKS */
2709 if (cnak_pending) {
2710 /* CNAk processing when rxfifo empty only */
170b778f 2711 if (readl(&dev->regs->sts) & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
55d402d8 2712 udc_process_cnak_queue(dev);
55d402d8
TD
2713 }
2714
2715finished:
2716 return ret_val;
2717}
2718
2719/* Interrupt handler for Control IN traffic */
2720static irqreturn_t udc_control_in_isr(struct udc *dev)
2721{
2722 irqreturn_t ret_val = IRQ_NONE;
2723 u32 tmp;
2724 struct udc_ep *ep;
2725 struct udc_request *req;
2726 unsigned len;
2727
2728 ep = &dev->ep[UDC_EP0IN_IX];
2729
2730 /* clear irq */
2731 writel(AMD_BIT(UDC_EPINT_IN_EP0), &dev->regs->ep_irqsts);
2732
2733 tmp = readl(&dev->ep[UDC_EP0IN_IX].regs->sts);
2734 /* DMA completion */
2735 if (tmp & AMD_BIT(UDC_EPSTS_TDC)) {
5647a149 2736 VDBG(dev, "isr: TDC clear\n");
55d402d8
TD
2737 ret_val = IRQ_HANDLED;
2738
2739 /* clear TDC bit */
2740 writel(AMD_BIT(UDC_EPSTS_TDC),
2741 &dev->ep[UDC_EP0IN_IX].regs->sts);
2742
2743 /* status reg has IN bit set ? */
2744 } else if (tmp & AMD_BIT(UDC_EPSTS_IN)) {
2745 ret_val = IRQ_HANDLED;
2746
2747 if (ep->dma) {
2748 /* clear IN bit */
2749 writel(AMD_BIT(UDC_EPSTS_IN),
2750 &dev->ep[UDC_EP0IN_IX].regs->sts);
2751 }
2752 if (dev->stall_ep0in) {
2753 DBG(dev, "stall ep0in\n");
2754 /* halt ep0in */
2755 tmp = readl(&ep->regs->ctl);
2756 tmp |= AMD_BIT(UDC_EPCTL_S);
2757 writel(tmp, &ep->regs->ctl);
2758 } else {
2759 if (!list_empty(&ep->queue)) {
2760 /* next request */
2761 req = list_entry(ep->queue.next,
2762 struct udc_request, queue);
2763
2764 if (ep->dma) {
2765 /* write desc pointer */
2766 writel(req->td_phys, &ep->regs->desptr);
2767 /* set HOST READY */
2768 req->td_data->status =
2769 AMD_ADDBITS(
2770 req->td_data->status,
2771 UDC_DMA_STP_STS_BS_HOST_READY,
2772 UDC_DMA_STP_STS_BS);
2773
2774 /* set poll demand bit */
2775 tmp =
2776 readl(&dev->ep[UDC_EP0IN_IX].regs->ctl);
2777 tmp |= AMD_BIT(UDC_EPCTL_P);
2778 writel(tmp,
2779 &dev->ep[UDC_EP0IN_IX].regs->ctl);
2780
2781 /* all bytes will be transferred */
2782 req->req.actual = req->req.length;
2783
2784 /* complete req */
2785 complete_req(ep, req, 0);
2786
2787 } else {
2788 /* write fifo */
2789 udc_txfifo_write(ep, &req->req);
2790
25985edc 2791 /* lengh bytes transferred */
55d402d8
TD
2792 len = req->req.length - req->req.actual;
2793 if (len > ep->ep.maxpacket)
2794 len = ep->ep.maxpacket;
2795
2796 req->req.actual += len;
2797 if (req->req.actual == req->req.length
2798 || (len != ep->ep.maxpacket)) {
2799 /* complete req */
2800 complete_req(ep, req, 0);
2801 }
2802 }
2803
2804 }
2805 }
2806 ep->halted = 0;
2807 dev->stall_ep0in = 0;
2808 if (!ep->dma) {
2809 /* clear IN bit */
2810 writel(AMD_BIT(UDC_EPSTS_IN),
2811 &dev->ep[UDC_EP0IN_IX].regs->sts);
2812 }
2813 }
2814
2815 return ret_val;
2816}
2817
2818
2819/* Interrupt handler for global device events */
2820static irqreturn_t udc_dev_isr(struct udc *dev, u32 dev_irq)
2821__releases(dev->lock)
2822__acquires(dev->lock)
2823{
2824 irqreturn_t ret_val = IRQ_NONE;
2825 u32 tmp;
2826 u32 cfg;
2827 struct udc_ep *ep;
2828 u16 i;
2829 u8 udc_csr_epix;
2830
2831 /* SET_CONFIG irq ? */
2832 if (dev_irq & AMD_BIT(UDC_DEVINT_SC)) {
2833 ret_val = IRQ_HANDLED;
2834
2835 /* read config value */
2836 tmp = readl(&dev->regs->sts);
2837 cfg = AMD_GETBITS(tmp, UDC_DEVSTS_CFG);
2838 DBG(dev, "SET_CONFIG interrupt: config=%d\n", cfg);
2839 dev->cur_config = cfg;
2840 dev->set_cfg_not_acked = 1;
2841
2842 /* make usb request for gadget driver */
2843 memset(&setup_data, 0 , sizeof(union udc_setup_data));
2844 setup_data.request.bRequest = USB_REQ_SET_CONFIGURATION;
fd05e720 2845 setup_data.request.wValue = cpu_to_le16(dev->cur_config);
55d402d8
TD
2846
2847 /* programm the NE registers */
2848 for (i = 0; i < UDC_EP_NUM; i++) {
2849 ep = &dev->ep[i];
2850 if (ep->in) {
2851
2852 /* ep ix in UDC CSR register space */
2853 udc_csr_epix = ep->num;
2854
2855
2856 /* OUT ep */
2857 } else {
2858 /* ep ix in UDC CSR register space */
2859 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2860 }
2861
2862 tmp = readl(&dev->csr->ne[udc_csr_epix]);
2863 /* ep cfg */
2864 tmp = AMD_ADDBITS(tmp, ep->dev->cur_config,
2865 UDC_CSR_NE_CFG);
2866 /* write reg */
2867 writel(tmp, &dev->csr->ne[udc_csr_epix]);
2868
2869 /* clear stall bits */
2870 ep->halted = 0;
2871 tmp = readl(&ep->regs->ctl);
2872 tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2873 writel(tmp, &ep->regs->ctl);
2874 }
2875 /* call gadget zero with setup data received */
2876 spin_unlock(&dev->lock);
2877 tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2878 spin_lock(&dev->lock);
2879
2880 } /* SET_INTERFACE ? */
2881 if (dev_irq & AMD_BIT(UDC_DEVINT_SI)) {
2882 ret_val = IRQ_HANDLED;
2883
2884 dev->set_cfg_not_acked = 1;
2885 /* read interface and alt setting values */
2886 tmp = readl(&dev->regs->sts);
2887 dev->cur_alt = AMD_GETBITS(tmp, UDC_DEVSTS_ALT);
2888 dev->cur_intf = AMD_GETBITS(tmp, UDC_DEVSTS_INTF);
2889
2890 /* make usb request for gadget driver */
2891 memset(&setup_data, 0 , sizeof(union udc_setup_data));
2892 setup_data.request.bRequest = USB_REQ_SET_INTERFACE;
2893 setup_data.request.bRequestType = USB_RECIP_INTERFACE;
fd05e720
AV
2894 setup_data.request.wValue = cpu_to_le16(dev->cur_alt);
2895 setup_data.request.wIndex = cpu_to_le16(dev->cur_intf);
55d402d8
TD
2896
2897 DBG(dev, "SET_INTERFACE interrupt: alt=%d intf=%d\n",
2898 dev->cur_alt, dev->cur_intf);
2899
2900 /* programm the NE registers */
2901 for (i = 0; i < UDC_EP_NUM; i++) {
2902 ep = &dev->ep[i];
2903 if (ep->in) {
2904
2905 /* ep ix in UDC CSR register space */
2906 udc_csr_epix = ep->num;
2907
2908
2909 /* OUT ep */
2910 } else {
2911 /* ep ix in UDC CSR register space */
2912 udc_csr_epix = ep->num - UDC_CSR_EP_OUT_IX_OFS;
2913 }
2914
2915 /* UDC CSR reg */
2916 /* set ep values */
2917 tmp = readl(&dev->csr->ne[udc_csr_epix]);
2918 /* ep interface */
2919 tmp = AMD_ADDBITS(tmp, ep->dev->cur_intf,
2920 UDC_CSR_NE_INTF);
2921 /* tmp = AMD_ADDBITS(tmp, 2, UDC_CSR_NE_INTF); */
2922 /* ep alt */
2923 tmp = AMD_ADDBITS(tmp, ep->dev->cur_alt,
2924 UDC_CSR_NE_ALT);
2925 /* write reg */
2926 writel(tmp, &dev->csr->ne[udc_csr_epix]);
2927
2928 /* clear stall bits */
2929 ep->halted = 0;
2930 tmp = readl(&ep->regs->ctl);
2931 tmp = tmp & AMD_CLEAR_BIT(UDC_EPCTL_S);
2932 writel(tmp, &ep->regs->ctl);
2933 }
2934
2935 /* call gadget zero with setup data received */
2936 spin_unlock(&dev->lock);
2937 tmp = dev->driver->setup(&dev->gadget, &setup_data.request);
2938 spin_lock(&dev->lock);
2939
2940 } /* USB reset */
2941 if (dev_irq & AMD_BIT(UDC_DEVINT_UR)) {
2942 DBG(dev, "USB Reset interrupt\n");
2943 ret_val = IRQ_HANDLED;
2944
2945 /* allow soft reset when suspend occurs */
2946 soft_reset_occured = 0;
2947
2948 dev->waiting_zlp_ack_ep0in = 0;
2949 dev->set_cfg_not_acked = 0;
2950
2951 /* mask not needed interrupts */
2952 udc_mask_unused_interrupts(dev);
2953
2954 /* call gadget to resume and reset configs etc. */
2955 spin_unlock(&dev->lock);
2956 if (dev->sys_suspended && dev->driver->resume) {
2957 dev->driver->resume(&dev->gadget);
2958 dev->sys_suspended = 0;
2959 }
107d13c7 2960 usb_gadget_udc_reset(&dev->gadget, dev->driver);
55d402d8
TD
2961 spin_lock(&dev->lock);
2962
2963 /* disable ep0 to empty req queue */
2964 empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
2965 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
2966
2967 /* soft reset when rxfifo not empty */
2968 tmp = readl(&dev->regs->sts);
2969 if (!(tmp & AMD_BIT(UDC_DEVSTS_RXFIFO_EMPTY))
2970 && !soft_reset_after_usbreset_occured) {
2971 udc_soft_reset(dev);
2972 soft_reset_after_usbreset_occured++;
2973 }
2974
2975 /*
2976 * DMA reset to kill potential old DMA hw hang,
2977 * POLL bit is already reset by ep_init() through
2978 * disconnect()
2979 */
2980 DBG(dev, "DMA machine reset\n");
2981 tmp = readl(&dev->regs->cfg);
2982 writel(tmp | AMD_BIT(UDC_DEVCFG_DMARST), &dev->regs->cfg);
2983 writel(tmp, &dev->regs->cfg);
2984
2985 /* put into initial config */
2986 udc_basic_init(dev);
2987
2988 /* enable device setup interrupts */
2989 udc_enable_dev_setup_interrupts(dev);
2990
2991 /* enable suspend interrupt */
2992 tmp = readl(&dev->regs->irqmsk);
2993 tmp &= AMD_UNMASK_BIT(UDC_DEVINT_US);
2994 writel(tmp, &dev->regs->irqmsk);
2995
2996 } /* USB suspend */
2997 if (dev_irq & AMD_BIT(UDC_DEVINT_US)) {
2998 DBG(dev, "USB Suspend interrupt\n");
2999 ret_val = IRQ_HANDLED;
3000 if (dev->driver->suspend) {
3001 spin_unlock(&dev->lock);
3002 dev->sys_suspended = 1;
3003 dev->driver->suspend(&dev->gadget);
3004 spin_lock(&dev->lock);
3005 }
3006 } /* new speed ? */
3007 if (dev_irq & AMD_BIT(UDC_DEVINT_ENUM)) {
3008 DBG(dev, "ENUM interrupt\n");
3009 ret_val = IRQ_HANDLED;
3010 soft_reset_after_usbreset_occured = 0;
3011
3012 /* disable ep0 to empty req queue */
3013 empty_req_queue(&dev->ep[UDC_EP0IN_IX]);
3014 ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
3015
3016 /* link up all endpoints */
3017 udc_setup_endpoints(dev);
e538dfda
MN
3018 dev_info(&dev->pdev->dev, "Connect: %s\n",
3019 usb_speed_string(dev->gadget.speed));
55d402d8
TD
3020
3021 /* init ep 0 */
3022 activate_control_endpoints(dev);
3023
3024 /* enable ep0 interrupts */
3025 udc_enable_ep0_interrupts(dev);
3026 }
3027 /* session valid change interrupt */
3028 if (dev_irq & AMD_BIT(UDC_DEVINT_SVC)) {
3029 DBG(dev, "USB SVC interrupt\n");
3030 ret_val = IRQ_HANDLED;
3031
3032 /* check that session is not valid to detect disconnect */
3033 tmp = readl(&dev->regs->sts);
3034 if (!(tmp & AMD_BIT(UDC_DEVSTS_SESSVLD))) {
3035 /* disable suspend interrupt */
3036 tmp = readl(&dev->regs->irqmsk);
3037 tmp |= AMD_BIT(UDC_DEVINT_US);
3038 writel(tmp, &dev->regs->irqmsk);
3039 DBG(dev, "USB Disconnect (session valid low)\n");
3040 /* cleanup on disconnect */
3041 usb_disconnect(udc);
3042 }
3043
3044 }
3045
3046 return ret_val;
3047}
3048
3049/* Interrupt Service Routine, see Linux Kernel Doc for parameters */
3050static irqreturn_t udc_irq(int irq, void *pdev)
3051{
3052 struct udc *dev = pdev;
3053 u32 reg;
3054 u16 i;
3055 u32 ep_irq;
3056 irqreturn_t ret_val = IRQ_NONE;
3057
3058 spin_lock(&dev->lock);
3059
3060 /* check for ep irq */
3061 reg = readl(&dev->regs->ep_irqsts);
3062 if (reg) {
3063 if (reg & AMD_BIT(UDC_EPINT_OUT_EP0))
3064 ret_val |= udc_control_out_isr(dev);
3065 if (reg & AMD_BIT(UDC_EPINT_IN_EP0))
3066 ret_val |= udc_control_in_isr(dev);
3067
3068 /*
3069 * data endpoint
3070 * iterate ep's
3071 */
3072 for (i = 1; i < UDC_EP_NUM; i++) {
3073 ep_irq = 1 << i;
3074 if (!(reg & ep_irq) || i == UDC_EPINT_OUT_EP0)
3075 continue;
3076
3077 /* clear irq status */
3078 writel(ep_irq, &dev->regs->ep_irqsts);
3079
3080 /* irq for out ep ? */
3081 if (i > UDC_EPIN_NUM)
3082 ret_val |= udc_data_out_isr(dev, i);
3083 else
3084 ret_val |= udc_data_in_isr(dev, i);
3085 }
3086
3087 }
3088
3089
3090 /* check for dev irq */
3091 reg = readl(&dev->regs->irqsts);
3092 if (reg) {
3093 /* clear irq */
3094 writel(reg, &dev->regs->irqsts);
3095 ret_val |= udc_dev_isr(dev, reg);
3096 }
3097
3098
3099 spin_unlock(&dev->lock);
3100 return ret_val;
3101}
3102
3103/* Tears down device */
3104static void gadget_release(struct device *pdev)
3105{
3106 struct amd5536udc *dev = dev_get_drvdata(pdev);
3107 kfree(dev);
3108}
3109
3110/* Cleanup on device remove */
3111static void udc_remove(struct udc *dev)
3112{
3113 /* remove timer */
3114 stop_timer++;
3115 if (timer_pending(&udc_timer))
3116 wait_for_completion(&on_exit);
3117 if (udc_timer.data)
3118 del_timer_sync(&udc_timer);
3119 /* remove pollstall timer */
3120 stop_pollstall_timer++;
3121 if (timer_pending(&udc_pollstall_timer))
3122 wait_for_completion(&on_pollstall_exit);
3123 if (udc_pollstall_timer.data)
3124 del_timer_sync(&udc_pollstall_timer);
3125 udc = NULL;
3126}
3127
580693bb
SM
3128/* free all the dma pools */
3129static void free_dma_pools(struct udc *dev)
3130{
3131 dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td,
3132 dev->ep[UDC_EP0OUT_IX].td_phys);
3133 dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
3134 dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3135 dma_pool_destroy(dev->stp_requests);
3136 dma_pool_destroy(dev->data_requests);
3137}
3138
55d402d8
TD
3139/* Reset all pci context */
3140static void udc_pci_remove(struct pci_dev *pdev)
3141{
3142 struct udc *dev;
3143
3144 dev = pci_get_drvdata(pdev);
3145
0f91349b 3146 usb_del_gadget_udc(&udc->gadget);
55d402d8 3147 /* gadget driver must not be registered */
2e1b7d0c
SM
3148 if (WARN_ON(dev->driver))
3149 return;
55d402d8
TD
3150
3151 /* dma pool cleanup */
f349dd3c 3152 free_dma_pools(dev);
55d402d8
TD
3153
3154 /* reset controller */
3155 writel(AMD_BIT(UDC_DEVCFG_SOFTRESET), &dev->regs->cfg);
76c3727d
SM
3156 free_irq(pdev->irq, dev);
3157 iounmap(dev->virt_addr);
3158 release_mem_region(pci_resource_start(pdev, 0),
3159 pci_resource_len(pdev, 0));
3160 pci_disable_device(pdev);
55d402d8 3161
55d402d8
TD
3162 udc_remove(dev);
3163}
3164
3165/* create dma pools on init */
3166static int init_dma_pools(struct udc *dev)
3167{
3168 struct udc_stp_dma *td_stp;
3169 struct udc_data_dma *td_data;
3170 int retval;
3171
3172 /* consistent DMA mode setting ? */
3173 if (use_dma_ppb) {
3174 use_dma_bufferfill_mode = 0;
3175 } else {
3176 use_dma_ppb_du = 0;
3177 use_dma_bufferfill_mode = 1;
3178 }
3179
3180 /* DMA setup */
3181 dev->data_requests = dma_pool_create("data_requests", NULL,
3182 sizeof(struct udc_data_dma), 0, 0);
3183 if (!dev->data_requests) {
3184 DBG(dev, "can't get request data pool\n");
14a37ec6 3185 return -ENOMEM;
55d402d8
TD
3186 }
3187
3188 /* EP0 in dma regs = dev control regs */
3189 dev->ep[UDC_EP0IN_IX].dma = &dev->regs->ctl;
3190
3191 /* dma desc for setup data */
3192 dev->stp_requests = dma_pool_create("setup requests", NULL,
3193 sizeof(struct udc_stp_dma), 0, 0);
3194 if (!dev->stp_requests) {
3195 DBG(dev, "can't get stp request pool\n");
3196 retval = -ENOMEM;
14a37ec6 3197 goto err_create_dma_pool;
55d402d8
TD
3198 }
3199 /* setup */
3200 td_stp = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3201 &dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3202 if (td_stp == NULL) {
3203 retval = -ENOMEM;
14a37ec6 3204 goto err_alloc_dma;
55d402d8
TD
3205 }
3206 dev->ep[UDC_EP0OUT_IX].td_stp = td_stp;
3207
3208 /* data: 0 packets !? */
3209 td_data = dma_pool_alloc(dev->stp_requests, GFP_KERNEL,
3210 &dev->ep[UDC_EP0OUT_IX].td_phys);
3211 if (td_data == NULL) {
3212 retval = -ENOMEM;
14a37ec6 3213 goto err_alloc_phys;
55d402d8
TD
3214 }
3215 dev->ep[UDC_EP0OUT_IX].td = td_data;
3216 return 0;
3217
14a37ec6
SM
3218err_alloc_phys:
3219 dma_pool_free(dev->stp_requests, dev->ep[UDC_EP0OUT_IX].td_stp,
3220 dev->ep[UDC_EP0OUT_IX].td_stp_dma);
3221err_alloc_dma:
3222 dma_pool_destroy(dev->stp_requests);
3223 dev->stp_requests = NULL;
3224err_create_dma_pool:
3225 dma_pool_destroy(dev->data_requests);
3226 dev->data_requests = NULL;
55d402d8
TD
3227 return retval;
3228}
3229
4f06b6bb
SM
3230/* general probe */
3231static int udc_probe(struct udc *dev)
3232{
3233 char tmp[128];
3234 u32 reg;
3235 int retval;
3236
3237 /* mark timer as not initialized */
3238 udc_timer.data = 0;
3239 udc_pollstall_timer.data = 0;
3240
3241 /* device struct setup */
3242 dev->gadget.ops = &udc_ops;
3243
3244 dev_set_name(&dev->gadget.dev, "gadget");
3245 dev->gadget.name = name;
3246 dev->gadget.max_speed = USB_SPEED_HIGH;
3247
3248 /* init registers, interrupts, ... */
3249 startup_registers(dev);
3250
3251 dev_info(&dev->pdev->dev, "%s\n", mod_desc);
3252
3253 snprintf(tmp, sizeof(tmp), "%d", dev->irq);
3254 dev_info(&dev->pdev->dev,
3255 "irq %s, pci mem %08lx, chip rev %02x(Geode5536 %s)\n",
3256 tmp, dev->phys_addr, dev->chiprev,
3257 (dev->chiprev == UDC_HSA0_REV) ? "A0" : "B1");
3258 strcpy(tmp, UDC_DRIVER_VERSION_STRING);
3259 if (dev->chiprev == UDC_HSA0_REV) {
3260 dev_err(&dev->pdev->dev, "chip revision is A0; too old\n");
3261 retval = -ENODEV;
3262 goto finished;
3263 }
3264 dev_info(&dev->pdev->dev,
3265 "driver version: %s(for Geode5536 B1)\n", tmp);
3266 udc = dev;
3267
3268 retval = usb_add_gadget_udc_release(&udc->pdev->dev, &dev->gadget,
3269 gadget_release);
3270 if (retval)
3271 goto finished;
3272
3273 /* timer init */
3274 init_timer(&udc_timer);
3275 udc_timer.function = udc_timer_function;
3276 udc_timer.data = 1;
3277 /* timer pollstall init */
3278 init_timer(&udc_pollstall_timer);
3279 udc_pollstall_timer.function = udc_pollstall_timer_function;
3280 udc_pollstall_timer.data = 1;
3281
3282 /* set SD */
3283 reg = readl(&dev->regs->ctl);
3284 reg |= AMD_BIT(UDC_DEVCTL_SD);
3285 writel(reg, &dev->regs->ctl);
3286
3287 /* print dev register info */
3288 print_regs(dev);
3289
3290 return 0;
3291
3292finished:
3293 return retval;
3294}
3295
55d402d8
TD
3296/* Called by pci bus driver to init pci context */
3297static int udc_pci_probe(
3298 struct pci_dev *pdev,
3299 const struct pci_device_id *id
3300)
3301{
3302 struct udc *dev;
3303 unsigned long resource;
3304 unsigned long len;
3305 int retval = 0;
3306
3307 /* one udc only */
3308 if (udc) {
3309 dev_dbg(&pdev->dev, "already probed\n");
3310 return -EBUSY;
3311 }
3312
3313 /* init */
3314 dev = kzalloc(sizeof(struct udc), GFP_KERNEL);
6527cc27
AK
3315 if (!dev)
3316 return -ENOMEM;
55d402d8
TD
3317
3318 /* pci setup */
3319 if (pci_enable_device(pdev) < 0) {
3320 retval = -ENODEV;
6527cc27 3321 goto err_pcidev;
55d402d8 3322 }
55d402d8
TD
3323
3324 /* PCI resource allocation */
3325 resource = pci_resource_start(pdev, 0);
3326 len = pci_resource_len(pdev, 0);
3327
3328 if (!request_mem_region(resource, len, name)) {
3329 dev_dbg(&pdev->dev, "pci device used already\n");
3330 retval = -EBUSY;
6527cc27 3331 goto err_memreg;
55d402d8 3332 }
55d402d8
TD
3333
3334 dev->virt_addr = ioremap_nocache(resource, len);
3335 if (dev->virt_addr == NULL) {
3336 dev_dbg(&pdev->dev, "start address cannot be mapped\n");
3337 retval = -EFAULT;
6527cc27 3338 goto err_ioremap;
55d402d8
TD
3339 }
3340
3341 if (!pdev->irq) {
25e14c1f 3342 dev_err(&pdev->dev, "irq not set\n");
55d402d8 3343 retval = -ENODEV;
6527cc27 3344 goto err_irq;
55d402d8
TD
3345 }
3346
c5deb832
TD
3347 spin_lock_init(&dev->lock);
3348 /* udc csr registers base */
3349 dev->csr = dev->virt_addr + UDC_CSR_ADDR;
3350 /* dev registers base */
3351 dev->regs = dev->virt_addr + UDC_DEVCFG_ADDR;
3352 /* ep registers base */
3353 dev->ep_regs = dev->virt_addr + UDC_EPREGS_ADDR;
3354 /* fifo's base */
3355 dev->rxfifo = (u32 __iomem *)(dev->virt_addr + UDC_RXFIFO_ADDR);
3356 dev->txfifo = (u32 __iomem *)(dev->virt_addr + UDC_TXFIFO_ADDR);
3357
55d402d8 3358 if (request_irq(pdev->irq, udc_irq, IRQF_SHARED, name, dev) != 0) {
25e14c1f 3359 dev_dbg(&pdev->dev, "request_irq(%d) fail\n", pdev->irq);
55d402d8 3360 retval = -EBUSY;
6527cc27 3361 goto err_irq;
55d402d8 3362 }
55d402d8
TD
3363
3364 pci_set_drvdata(pdev, dev);
3365
1d3ee41e
AK
3366 /* chip revision for Hs AMD5536 */
3367 dev->chiprev = pdev->revision;
55d402d8
TD
3368
3369 pci_set_master(pdev);
51745281 3370 pci_try_set_mwi(pdev);
55d402d8 3371
55d402d8
TD
3372 /* init dma pools */
3373 if (use_dma) {
3374 retval = init_dma_pools(dev);
3375 if (retval != 0)
580693bb 3376 goto err_dma;
55d402d8
TD
3377 }
3378
3379 dev->phys_addr = resource;
3380 dev->irq = pdev->irq;
3381 dev->pdev = pdev;
55d402d8
TD
3382
3383 /* general probing */
580693bb
SM
3384 if (udc_probe(dev)) {
3385 retval = -ENODEV;
3386 goto err_probe;
3387 }
3388 return 0;
6527cc27 3389
580693bb
SM
3390err_probe:
3391 if (use_dma)
3392 free_dma_pools(dev);
3393err_dma:
3394 free_irq(pdev->irq, dev);
6527cc27
AK
3395err_irq:
3396 iounmap(dev->virt_addr);
3397err_ioremap:
3398 release_mem_region(resource, len);
3399err_memreg:
3400 pci_disable_device(pdev);
3401err_pcidev:
3402 kfree(dev);
55d402d8
TD
3403 return retval;
3404}
3405
55d402d8 3406/* PCI device parameters */
9510ecee 3407static const struct pci_device_id pci_id[] = {
55d402d8
TD
3408 {
3409 PCI_DEVICE(PCI_VENDOR_ID_AMD, 0x2096),
3410 .class = (PCI_CLASS_SERIAL_USB << 8) | 0xfe,
3411 .class_mask = 0xffffffff,
3412 },
3413 {},
3414};
3415MODULE_DEVICE_TABLE(pci, pci_id);
3416
3417/* PCI functions */
3418static struct pci_driver udc_pci_driver = {
3419 .name = (char *) name,
3420 .id_table = pci_id,
3421 .probe = udc_pci_probe,
3422 .remove = udc_pci_remove,
3423};
3424
3cdb7721 3425module_pci_driver(udc_pci_driver);
55d402d8
TD
3426
3427MODULE_DESCRIPTION(UDC_MOD_DESCRIPTION);
3428MODULE_AUTHOR("Thomas Dahlmann");
3429MODULE_LICENSE("GPL");
3430