usb: gadget: net2280: Refactor queues_show
[linux-2.6-block.git] / drivers / usb / gadget / net2280.c
CommitLineData
1da177e4
LT
1/*
2 * Driver for the PLX NET2280 USB device controller.
3 * Specs and errata are available from <http://www.plxtech.com>.
4 *
901b3d75 5 * PLX Technology Inc. (formerly NetChip Technology) supported the
1da177e4
LT
6 * development of this driver.
7 *
8 *
9 * CODE STATUS HIGHLIGHTS
10 *
11 * This driver should work well with most "gadget" drivers, including
fa06920a 12 * the Mass Storage, Serial, and Ethernet/RNDIS gadget drivers
1da177e4
LT
13 * as well as Gadget Zero and Gadgetfs.
14 *
15 * DMA is enabled by default. Drivers using transfer queues might use
16 * DMA chaining to remove IRQ latencies between transfers. (Except when
17 * short OUT transfers happen.) Drivers can use the req->no_interrupt
18 * hint to completely eliminate some IRQs, if a later IRQ is guaranteed
19 * and DMA chaining is enabled.
20 *
adc82f77
RRD
21 * MSI is enabled by default. The legacy IRQ is used if MSI couldn't
22 * be enabled.
23 *
1da177e4
LT
24 * Note that almost all the errata workarounds here are only needed for
25 * rev1 chips. Rev1a silicon (0110) fixes almost all of them.
26 */
27
28/*
29 * Copyright (C) 2003 David Brownell
30 * Copyright (C) 2003-2005 PLX Technology, Inc.
adc82f77 31 * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS
1da177e4 32 *
901b3d75
DB
33 * Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility
34 * with 2282 chip
950ee4c8 35 *
adc82f77
RRD
36 * Modified Ricardo Ribalda Qtechnology AS to provide compatibility
37 * with usb 338x chip. Based on PLX driver
38 *
1da177e4
LT
39 * This program is free software; you can redistribute it and/or modify
40 * it under the terms of the GNU General Public License as published by
41 * the Free Software Foundation; either version 2 of the License, or
42 * (at your option) any later version.
1da177e4
LT
43 */
44
45#undef DEBUG /* messages on error and most fault paths */
46#undef VERBOSE /* extra debug messages (success too) */
47
1da177e4
LT
48#include <linux/module.h>
49#include <linux/pci.h>
682d4c80 50#include <linux/dma-mapping.h>
1da177e4
LT
51#include <linux/kernel.h>
52#include <linux/delay.h>
53#include <linux/ioport.h>
1da177e4 54#include <linux/slab.h>
1da177e4
LT
55#include <linux/errno.h>
56#include <linux/init.h>
57#include <linux/timer.h>
58#include <linux/list.h>
59#include <linux/interrupt.h>
60#include <linux/moduleparam.h>
61#include <linux/device.h>
5f848137 62#include <linux/usb/ch9.h>
9454a57a 63#include <linux/usb/gadget.h>
b38b03b3 64#include <linux/prefetch.h>
1da177e4
LT
65
66#include <asm/byteorder.h>
67#include <asm/io.h>
68#include <asm/irq.h>
1da177e4
LT
69#include <asm/unaligned.h>
70
adc82f77
RRD
71#define DRIVER_DESC "PLX NET228x/USB338x USB Peripheral Controller"
72#define DRIVER_VERSION "2005 Sept 27/v3.0"
1da177e4 73
1da177e4
LT
74#define EP_DONTUSE 13 /* nonzero */
75
76#define USE_RDK_LEDS /* GPIO pins control three LEDs */
77
78
79static const char driver_name [] = "net2280";
80static const char driver_desc [] = DRIVER_DESC;
81
adc82f77 82static const u32 ep_bit[9] = { 0, 17, 2, 19, 4, 1, 18, 3, 20 };
1da177e4 83static const char ep0name [] = "ep0";
901b3d75 84static const char *const ep_name [] = {
1da177e4
LT
85 ep0name,
86 "ep-a", "ep-b", "ep-c", "ep-d",
adc82f77 87 "ep-e", "ep-f", "ep-g", "ep-h",
1da177e4
LT
88};
89
90/* use_dma -- general goodness, fewer interrupts, less cpu load (vs PIO)
91 * use_dma_chaining -- dma descriptor queueing gives even more irq reduction
92 *
93 * The net2280 DMA engines are not tightly integrated with their FIFOs;
94 * not all cases are (yet) handled well in this driver or the silicon.
95 * Some gadget drivers work better with the dma support here than others.
96 * These two parameters let you use PIO or more aggressive DMA.
97 */
00d4db0e
RRD
98static bool use_dma = true;
99static bool use_dma_chaining;
100static bool use_msi = true;
1da177e4
LT
101
102/* "modprobe net2280 use_dma=n" etc */
103module_param (use_dma, bool, S_IRUGO);
104module_param (use_dma_chaining, bool, S_IRUGO);
adc82f77 105module_param(use_msi, bool, S_IRUGO);
1da177e4
LT
106
107/* mode 0 == ep-{a,b,c,d} 1K fifo each
108 * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
109 * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
110 */
111static ushort fifo_mode = 0;
112
113/* "modprobe net2280 fifo_mode=1" etc */
114module_param (fifo_mode, ushort, 0644);
115
116/* enable_suspend -- When enabled, the driver will respond to
117 * USB suspend requests by powering down the NET2280. Otherwise,
25985edc 118 * USB suspend requests will be ignored. This is acceptable for
950ee4c8 119 * self-powered devices
1da177e4 120 */
00d4db0e 121static bool enable_suspend;
1da177e4
LT
122
123/* "modprobe net2280 enable_suspend=1" etc */
124module_param (enable_suspend, bool, S_IRUGO);
125
2f076077
AS
126/* force full-speed operation */
127static bool full_speed;
128module_param(full_speed, bool, 0444);
129MODULE_PARM_DESC(full_speed, "force full-speed mode -- for testing only!");
1da177e4
LT
130
131#define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
132
133#if defined(CONFIG_USB_GADGET_DEBUG_FILES) || defined (DEBUG)
134static char *type_string (u8 bmAttributes)
135{
136 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
137 case USB_ENDPOINT_XFER_BULK: return "bulk";
138 case USB_ENDPOINT_XFER_ISOC: return "iso";
139 case USB_ENDPOINT_XFER_INT: return "intr";
2b84f92b 140 }
1da177e4
LT
141 return "control";
142}
143#endif
144
145#include "net2280.h"
146
3e76fdcb
RRD
147#define valid_bit cpu_to_le32(BIT(VALID_BIT))
148#define dma_done_ie cpu_to_le32(BIT(DMA_DONE_INTERRUPT_ENABLE))
1da177e4
LT
149
150/*-------------------------------------------------------------------------*/
adc82f77
RRD
151static inline void enable_pciirqenb(struct net2280_ep *ep)
152{
153 u32 tmp = readl(&ep->dev->regs->pciirqenb0);
154
c2db8a8a 155 if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
3e76fdcb 156 tmp |= BIT(ep->num);
adc82f77 157 else
3e76fdcb 158 tmp |= BIT(ep_bit[ep->num]);
adc82f77
RRD
159 writel(tmp, &ep->dev->regs->pciirqenb0);
160
161 return;
162}
1da177e4
LT
163
164static int
165net2280_enable (struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
166{
167 struct net2280 *dev;
168 struct net2280_ep *ep;
169 u32 max, tmp;
170 unsigned long flags;
adc82f77 171 static const u32 ep_key[9] = { 1, 0, 1, 0, 1, 1, 0, 1, 0 };
1da177e4
LT
172
173 ep = container_of (_ep, struct net2280_ep, ep);
174 if (!_ep || !desc || ep->desc || _ep->name == ep0name
175 || desc->bDescriptorType != USB_DT_ENDPOINT)
176 return -EINVAL;
177 dev = ep->dev;
178 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
179 return -ESHUTDOWN;
180
181 /* erratum 0119 workaround ties up an endpoint number */
182 if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE)
183 return -EDOM;
184
c2db8a8a 185 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX) {
adc82f77
RRD
186 if ((desc->bEndpointAddress & 0x0f) >= 0x0c)
187 return -EDOM;
188 ep->is_in = !!usb_endpoint_dir_in(desc);
189 if (dev->enhanced_mode && ep->is_in && ep_key[ep->num])
190 return -EINVAL;
191 }
192
1da177e4 193 /* sanity check ep-e/ep-f since their fifos are small */
29cc8897 194 max = usb_endpoint_maxp (desc) & 0x1fff;
c2db8a8a
RRD
195 if (ep->num > 4 && max > 64 &&
196 (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY))
1da177e4
LT
197 return -ERANGE;
198
199 spin_lock_irqsave (&dev->lock, flags);
200 _ep->maxpacket = max & 0x7ff;
201 ep->desc = desc;
202
203 /* ep_reset() has already been called */
204 ep->stopped = 0;
8066134f 205 ep->wedged = 0;
1da177e4
LT
206 ep->out_overflow = 0;
207
208 /* set speed-dependent max packet; may kick in high bandwidth */
adc82f77 209 set_max_speed(ep, max);
1da177e4
LT
210
211 /* FIFO lines can't go to different packets. PIO is ok, so
212 * use it instead of troublesome (non-bulk) multi-packet DMA.
213 */
214 if (ep->dma && (max % 4) != 0 && use_dma_chaining) {
215 DEBUG (ep->dev, "%s, no dma for maxpacket %d\n",
216 ep->ep.name, ep->ep.maxpacket);
217 ep->dma = NULL;
218 }
219
220 /* set type, direction, address; reset fifo counters */
3e76fdcb 221 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
1da177e4
LT
222 tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
223 if (tmp == USB_ENDPOINT_XFER_INT) {
224 /* erratum 0105 workaround prevents hs NYET */
225 if (dev->chiprev == 0100
226 && dev->gadget.speed == USB_SPEED_HIGH
227 && !(desc->bEndpointAddress & USB_DIR_IN))
3e76fdcb 228 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE),
1da177e4
LT
229 &ep->regs->ep_rsp);
230 } else if (tmp == USB_ENDPOINT_XFER_BULK) {
231 /* catch some particularly blatant driver bugs */
adc82f77
RRD
232 if ((dev->gadget.speed == USB_SPEED_SUPER && max != 1024) ||
233 (dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
234 (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
235 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
236 return -ERANGE;
237 }
238 }
239 ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC) ? 1 : 0;
adc82f77 240 /* Enable this endpoint */
c2db8a8a 241 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY) {
adc82f77
RRD
242 tmp <<= ENDPOINT_TYPE;
243 tmp |= desc->bEndpointAddress;
244 /* default full fifo lines */
245 tmp |= (4 << ENDPOINT_BYTE_COUNT);
3e76fdcb 246 tmp |= BIT(ENDPOINT_ENABLE);
adc82f77
RRD
247 ep->is_in = (tmp & USB_DIR_IN) != 0;
248 } else {
249 /* In Legacy mode, only OUT endpoints are used */
250 if (dev->enhanced_mode && ep->is_in) {
251 tmp <<= IN_ENDPOINT_TYPE;
3e76fdcb 252 tmp |= BIT(IN_ENDPOINT_ENABLE);
adc82f77 253 /* Not applicable to Legacy */
3e76fdcb 254 tmp |= BIT(ENDPOINT_DIRECTION);
adc82f77
RRD
255 } else {
256 tmp <<= OUT_ENDPOINT_TYPE;
3e76fdcb 257 tmp |= BIT(OUT_ENDPOINT_ENABLE);
adc82f77
RRD
258 tmp |= (ep->is_in << ENDPOINT_DIRECTION);
259 }
260
261 tmp |= usb_endpoint_num(desc);
262 tmp |= (ep->ep.maxburst << MAX_BURST_SIZE);
263 }
264
265 /* Make sure all the registers are written before ep_rsp*/
266 wmb();
1da177e4
LT
267
268 /* for OUT transfers, block the rx fifo until a read is posted */
1da177e4 269 if (!ep->is_in)
3e76fdcb 270 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
950ee4c8 271 else if (dev->pdev->device != 0x2280) {
901b3d75
DB
272 /* Added for 2282, Don't use nak packets on an in endpoint,
273 * this was ignored on 2280
274 */
3e76fdcb
RRD
275 writel(BIT(CLEAR_NAK_OUT_PACKETS) |
276 BIT(CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp);
950ee4c8 277 }
1da177e4 278
adc82f77 279 writel(tmp, &ep->cfg->ep_cfg);
1da177e4
LT
280
281 /* enable irqs */
282 if (!ep->dma) { /* pio, per-packet */
adc82f77 283 enable_pciirqenb(ep);
1da177e4 284
3e76fdcb
RRD
285 tmp = BIT(DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) |
286 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE);
950ee4c8
GL
287 if (dev->pdev->device == 0x2280)
288 tmp |= readl (&ep->regs->ep_irqenb);
1da177e4
LT
289 writel (tmp, &ep->regs->ep_irqenb);
290 } else { /* dma, per-request */
3e76fdcb 291 tmp = BIT((8 + ep->num)); /* completion */
1da177e4
LT
292 tmp |= readl (&dev->regs->pciirqenb1);
293 writel (tmp, &dev->regs->pciirqenb1);
294
295 /* for short OUT transfers, dma completions can't
296 * advance the queue; do it pio-style, by hand.
297 * NOTE erratum 0112 workaround #2
298 */
299 if ((desc->bEndpointAddress & USB_DIR_IN) == 0) {
3e76fdcb 300 tmp = BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE);
1da177e4
LT
301 writel (tmp, &ep->regs->ep_irqenb);
302
adc82f77 303 enable_pciirqenb(ep);
1da177e4
LT
304 }
305 }
306
307 tmp = desc->bEndpointAddress;
308 DEBUG (dev, "enabled %s (ep%d%s-%s) %s max %04x\n",
309 _ep->name, tmp & 0x0f, DIR_STRING (tmp),
310 type_string (desc->bmAttributes),
311 ep->dma ? "dma" : "pio", max);
312
313 /* pci writes may still be posted */
314 spin_unlock_irqrestore (&dev->lock, flags);
315 return 0;
316}
317
318static int handshake (u32 __iomem *ptr, u32 mask, u32 done, int usec)
319{
320 u32 result;
321
322 do {
323 result = readl (ptr);
324 if (result == ~(u32)0) /* "device unplugged" */
325 return -ENODEV;
326 result &= mask;
327 if (result == done)
328 return 0;
329 udelay (1);
330 usec--;
331 } while (usec > 0);
332 return -ETIMEDOUT;
333}
334
901b3d75 335static const struct usb_ep_ops net2280_ep_ops;
1da177e4 336
adc82f77
RRD
337static void ep_reset_228x(struct net2280_regs __iomem *regs,
338 struct net2280_ep *ep)
1da177e4
LT
339{
340 u32 tmp;
341
342 ep->desc = NULL;
343 INIT_LIST_HEAD (&ep->queue);
344
e117e742 345 usb_ep_set_maxpacket_limit(&ep->ep, ~0);
1da177e4
LT
346 ep->ep.ops = &net2280_ep_ops;
347
348 /* disable the dma, irqs, endpoint... */
349 if (ep->dma) {
350 writel (0, &ep->dma->dmactl);
3e76fdcb
RRD
351 writel(BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
352 BIT(DMA_TRANSACTION_DONE_INTERRUPT) |
353 BIT(DMA_ABORT),
354 &ep->dma->dmastat);
1da177e4
LT
355
356 tmp = readl (&regs->pciirqenb0);
3e76fdcb 357 tmp &= ~BIT(ep->num);
1da177e4
LT
358 writel (tmp, &regs->pciirqenb0);
359 } else {
360 tmp = readl (&regs->pciirqenb1);
3e76fdcb 361 tmp &= ~BIT((8 + ep->num)); /* completion */
1da177e4
LT
362 writel (tmp, &regs->pciirqenb1);
363 }
364 writel (0, &ep->regs->ep_irqenb);
365
366 /* init to our chosen defaults, notably so that we NAK OUT
367 * packets until the driver queues a read (+note erratum 0112)
368 */
950ee4c8 369 if (!ep->is_in || ep->dev->pdev->device == 0x2280) {
3e76fdcb
RRD
370 tmp = BIT(SET_NAK_OUT_PACKETS_MODE) |
371 BIT(SET_NAK_OUT_PACKETS) |
372 BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
373 BIT(CLEAR_INTERRUPT_MODE);
950ee4c8
GL
374 } else {
375 /* added for 2282 */
3e76fdcb
RRD
376 tmp = BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
377 BIT(CLEAR_NAK_OUT_PACKETS) |
378 BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
379 BIT(CLEAR_INTERRUPT_MODE);
950ee4c8 380 }
1da177e4
LT
381
382 if (ep->num != 0) {
3e76fdcb
RRD
383 tmp |= BIT(CLEAR_ENDPOINT_TOGGLE) |
384 BIT(CLEAR_ENDPOINT_HALT);
1da177e4
LT
385 }
386 writel (tmp, &ep->regs->ep_rsp);
387
388 /* scrub most status bits, and flush any fifo state */
950ee4c8 389 if (ep->dev->pdev->device == 0x2280)
3e76fdcb
RRD
390 tmp = BIT(FIFO_OVERFLOW) |
391 BIT(FIFO_UNDERFLOW);
950ee4c8
GL
392 else
393 tmp = 0;
394
3e76fdcb
RRD
395 writel(tmp | BIT(TIMEOUT) |
396 BIT(USB_STALL_SENT) |
397 BIT(USB_IN_NAK_SENT) |
398 BIT(USB_IN_ACK_RCVD) |
399 BIT(USB_OUT_PING_NAK_SENT) |
400 BIT(USB_OUT_ACK_SENT) |
401 BIT(FIFO_FLUSH) |
402 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
403 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
404 BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
405 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
406 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
407 BIT(DATA_IN_TOKEN_INTERRUPT)
1da177e4
LT
408 , &ep->regs->ep_stat);
409
410 /* fifo size is handled separately */
411}
412
adc82f77
RRD
413static void ep_reset_338x(struct net2280_regs __iomem *regs,
414 struct net2280_ep *ep)
415{
416 u32 tmp, dmastat;
417
418 ep->desc = NULL;
419 INIT_LIST_HEAD(&ep->queue);
420
421 usb_ep_set_maxpacket_limit(&ep->ep, ~0);
422 ep->ep.ops = &net2280_ep_ops;
423
424 /* disable the dma, irqs, endpoint... */
425 if (ep->dma) {
426 writel(0, &ep->dma->dmactl);
3e76fdcb
RRD
427 writel(BIT(DMA_ABORT_DONE_INTERRUPT) |
428 BIT(DMA_PAUSE_DONE_INTERRUPT) |
429 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
430 BIT(DMA_TRANSACTION_DONE_INTERRUPT)
431 /* | BIT(DMA_ABORT) */
adc82f77
RRD
432 , &ep->dma->dmastat);
433
434 dmastat = readl(&ep->dma->dmastat);
435 if (dmastat == 0x5002) {
436 WARNING(ep->dev, "The dmastat return = %x!!\n",
437 dmastat);
438 writel(0x5a, &ep->dma->dmastat);
439 }
440
441 tmp = readl(&regs->pciirqenb0);
3e76fdcb 442 tmp &= ~BIT(ep_bit[ep->num]);
adc82f77
RRD
443 writel(tmp, &regs->pciirqenb0);
444 } else {
445 if (ep->num < 5) {
446 tmp = readl(&regs->pciirqenb1);
3e76fdcb 447 tmp &= ~BIT((8 + ep->num)); /* completion */
adc82f77
RRD
448 writel(tmp, &regs->pciirqenb1);
449 }
450 }
451 writel(0, &ep->regs->ep_irqenb);
452
3e76fdcb
RRD
453 writel(BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
454 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
455 BIT(FIFO_OVERFLOW) |
456 BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
457 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
458 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
459 BIT(DATA_IN_TOKEN_INTERRUPT), &ep->regs->ep_stat);
adc82f77
RRD
460}
461
1da177e4
LT
462static void nuke (struct net2280_ep *);
463
464static int net2280_disable (struct usb_ep *_ep)
465{
466 struct net2280_ep *ep;
467 unsigned long flags;
468
469 ep = container_of (_ep, struct net2280_ep, ep);
470 if (!_ep || !ep->desc || _ep->name == ep0name)
471 return -EINVAL;
472
473 spin_lock_irqsave (&ep->dev->lock, flags);
474 nuke (ep);
adc82f77 475
c2db8a8a 476 if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX)
adc82f77
RRD
477 ep_reset_338x(ep->dev->regs, ep);
478 else
479 ep_reset_228x(ep->dev->regs, ep);
1da177e4
LT
480
481 VDEBUG (ep->dev, "disabled %s %s\n",
482 ep->dma ? "dma" : "pio", _ep->name);
483
484 /* synch memory views with the device */
adc82f77 485 (void)readl(&ep->cfg->ep_cfg);
1da177e4
LT
486
487 if (use_dma && !ep->dma && ep->num >= 1 && ep->num <= 4)
488 ep->dma = &ep->dev->dma [ep->num - 1];
489
490 spin_unlock_irqrestore (&ep->dev->lock, flags);
491 return 0;
492}
493
494/*-------------------------------------------------------------------------*/
495
496static struct usb_request *
55016f10 497net2280_alloc_request (struct usb_ep *_ep, gfp_t gfp_flags)
1da177e4
LT
498{
499 struct net2280_ep *ep;
500 struct net2280_request *req;
501
502 if (!_ep)
503 return NULL;
504 ep = container_of (_ep, struct net2280_ep, ep);
505
7039f422 506 req = kzalloc(sizeof(*req), gfp_flags);
1da177e4
LT
507 if (!req)
508 return NULL;
509
1da177e4
LT
510 INIT_LIST_HEAD (&req->queue);
511
512 /* this dma descriptor may be swapped with the previous dummy */
513 if (ep->dma) {
514 struct net2280_dma *td;
515
516 td = pci_pool_alloc (ep->dev->requests, gfp_flags,
517 &req->td_dma);
518 if (!td) {
519 kfree (req);
520 return NULL;
521 }
522 td->dmacount = 0; /* not VALID */
1da177e4
LT
523 td->dmadesc = td->dmaaddr;
524 req->td = td;
525 }
526 return &req->req;
527}
528
529static void
530net2280_free_request (struct usb_ep *_ep, struct usb_request *_req)
531{
532 struct net2280_ep *ep;
533 struct net2280_request *req;
534
535 ep = container_of (_ep, struct net2280_ep, ep);
536 if (!_ep || !_req)
537 return;
538
539 req = container_of (_req, struct net2280_request, req);
540 WARN_ON (!list_empty (&req->queue));
541 if (req->td)
542 pci_pool_free (ep->dev->requests, req->td, req->td_dma);
543 kfree (req);
544}
545
546/*-------------------------------------------------------------------------*/
547
1da177e4
LT
548/* load a packet into the fifo we use for usb IN transfers.
549 * works for all endpoints.
550 *
551 * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
552 * at a time, but this code is simpler because it knows it only writes
553 * one packet. ep-a..ep-d should use dma instead.
554 */
555static void
556write_fifo (struct net2280_ep *ep, struct usb_request *req)
557{
558 struct net2280_ep_regs __iomem *regs = ep->regs;
559 u8 *buf;
560 u32 tmp;
561 unsigned count, total;
562
563 /* INVARIANT: fifo is currently empty. (testable) */
564
565 if (req) {
566 buf = req->buf + req->actual;
567 prefetch (buf);
568 total = req->length - req->actual;
569 } else {
570 total = 0;
571 buf = NULL;
572 }
573
574 /* write just one packet at a time */
575 count = ep->ep.maxpacket;
576 if (count > total) /* min() cannot be used on a bitfield */
577 count = total;
578
579 VDEBUG (ep->dev, "write %s fifo (IN) %d bytes%s req %p\n",
580 ep->ep.name, count,
581 (count != ep->ep.maxpacket) ? " (short)" : "",
582 req);
583 while (count >= 4) {
584 /* NOTE be careful if you try to align these. fifo lines
585 * should normally be full (4 bytes) and successive partial
586 * lines are ok only in certain cases.
587 */
588 tmp = get_unaligned ((u32 *)buf);
589 cpu_to_le32s (&tmp);
590 writel (tmp, &regs->ep_data);
591 buf += 4;
592 count -= 4;
593 }
594
595 /* last fifo entry is "short" unless we wrote a full packet.
596 * also explicitly validate last word in (periodic) transfers
597 * when maxpacket is not a multiple of 4 bytes.
598 */
599 if (count || total < ep->ep.maxpacket) {
600 tmp = count ? get_unaligned ((u32 *)buf) : count;
601 cpu_to_le32s (&tmp);
602 set_fifo_bytecount (ep, count & 0x03);
603 writel (tmp, &regs->ep_data);
604 }
605
606 /* pci writes may still be posted */
607}
608
609/* work around erratum 0106: PCI and USB race over the OUT fifo.
610 * caller guarantees chiprev 0100, out endpoint is NAKing, and
611 * there's no real data in the fifo.
612 *
613 * NOTE: also used in cases where that erratum doesn't apply:
614 * where the host wrote "too much" data to us.
615 */
616static void out_flush (struct net2280_ep *ep)
617{
618 u32 __iomem *statp;
619 u32 tmp;
620
621 ASSERT_OUT_NAKING (ep);
622
623 statp = &ep->regs->ep_stat;
3e76fdcb
RRD
624 writel(BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
625 BIT(DATA_PACKET_RECEIVED_INTERRUPT)
1da177e4 626 , statp);
3e76fdcb 627 writel(BIT(FIFO_FLUSH), statp);
1da177e4
LT
628 mb ();
629 tmp = readl (statp);
3e76fdcb 630 if (tmp & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)
1da177e4
LT
631 /* high speed did bulk NYET; fifo isn't filling */
632 && ep->dev->gadget.speed == USB_SPEED_FULL) {
633 unsigned usec;
634
635 usec = 50; /* 64 byte bulk/interrupt */
3e76fdcb
RRD
636 handshake(statp, BIT(USB_OUT_PING_NAK_SENT),
637 BIT(USB_OUT_PING_NAK_SENT), usec);
1da177e4
LT
638 /* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */
639 }
640}
641
642/* unload packet(s) from the fifo we use for usb OUT transfers.
643 * returns true iff the request completed, because of short packet
644 * or the request buffer having filled with full packets.
645 *
646 * for ep-a..ep-d this will read multiple packets out when they
647 * have been accepted.
648 */
649static int
650read_fifo (struct net2280_ep *ep, struct net2280_request *req)
651{
652 struct net2280_ep_regs __iomem *regs = ep->regs;
653 u8 *buf = req->req.buf + req->req.actual;
654 unsigned count, tmp, is_short;
655 unsigned cleanup = 0, prevent = 0;
656
657 /* erratum 0106 ... packets coming in during fifo reads might
658 * be incompletely rejected. not all cases have workarounds.
659 */
660 if (ep->dev->chiprev == 0x0100
661 && ep->dev->gadget.speed == USB_SPEED_FULL) {
662 udelay (1);
663 tmp = readl (&ep->regs->ep_stat);
3e76fdcb 664 if ((tmp & BIT(NAK_OUT_PACKETS)))
1da177e4 665 cleanup = 1;
3e76fdcb 666 else if ((tmp & BIT(FIFO_FULL))) {
1da177e4
LT
667 start_out_naking (ep);
668 prevent = 1;
669 }
670 /* else: hope we don't see the problem */
671 }
672
673 /* never overflow the rx buffer. the fifo reads packets until
674 * it sees a short one; we might not be ready for them all.
675 */
676 prefetchw (buf);
677 count = readl (&regs->ep_avail);
678 if (unlikely (count == 0)) {
679 udelay (1);
680 tmp = readl (&ep->regs->ep_stat);
681 count = readl (&regs->ep_avail);
682 /* handled that data already? */
3e76fdcb 683 if (count == 0 && (tmp & BIT(NAK_OUT_PACKETS)) == 0)
1da177e4
LT
684 return 0;
685 }
686
687 tmp = req->req.length - req->req.actual;
688 if (count > tmp) {
689 /* as with DMA, data overflow gets flushed */
690 if ((tmp % ep->ep.maxpacket) != 0) {
691 ERROR (ep->dev,
692 "%s out fifo %d bytes, expected %d\n",
693 ep->ep.name, count, tmp);
694 req->req.status = -EOVERFLOW;
695 cleanup = 1;
696 /* NAK_OUT_PACKETS will be set, so flushing is safe;
697 * the next read will start with the next packet
698 */
699 } /* else it's a ZLP, no worries */
700 count = tmp;
701 }
702 req->req.actual += count;
703
704 is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0);
705
706 VDEBUG (ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n",
707 ep->ep.name, count, is_short ? " (short)" : "",
708 cleanup ? " flush" : "", prevent ? " nak" : "",
709 req, req->req.actual, req->req.length);
710
711 while (count >= 4) {
712 tmp = readl (&regs->ep_data);
713 cpu_to_le32s (&tmp);
714 put_unaligned (tmp, (u32 *)buf);
715 buf += 4;
716 count -= 4;
717 }
718 if (count) {
719 tmp = readl (&regs->ep_data);
720 /* LE conversion is implicit here: */
721 do {
722 *buf++ = (u8) tmp;
723 tmp >>= 8;
724 } while (--count);
725 }
726 if (cleanup)
727 out_flush (ep);
728 if (prevent) {
3e76fdcb 729 writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
1da177e4
LT
730 (void) readl (&ep->regs->ep_rsp);
731 }
732
733 return is_short || ((req->req.actual == req->req.length)
734 && !req->req.zero);
735}
736
737/* fill out dma descriptor to match a given request */
738static void
739fill_dma_desc (struct net2280_ep *ep, struct net2280_request *req, int valid)
740{
741 struct net2280_dma *td = req->td;
742 u32 dmacount = req->req.length;
743
744 /* don't let DMA continue after a short OUT packet,
745 * so overruns can't affect the next transfer.
746 * in case of overruns on max-size packets, we can't
747 * stop the fifo from filling but we can flush it.
748 */
749 if (ep->is_in)
3e76fdcb 750 dmacount |= BIT(DMA_DIRECTION);
901b3d75
DB
751 if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0)
752 || ep->dev->pdev->device != 0x2280)
3e76fdcb 753 dmacount |= BIT(END_OF_CHAIN);
1da177e4
LT
754
755 req->valid = valid;
756 if (valid)
3e76fdcb 757 dmacount |= BIT(VALID_BIT);
1da177e4 758 if (likely(!req->req.no_interrupt || !use_dma_chaining))
3e76fdcb 759 dmacount |= BIT(DMA_DONE_INTERRUPT_ENABLE);
1da177e4
LT
760
761 /* td->dmadesc = previously set by caller */
762 td->dmaaddr = cpu_to_le32 (req->req.dma);
763
764 /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
765 wmb ();
da2bbdcc 766 td->dmacount = cpu_to_le32(dmacount);
1da177e4
LT
767}
768
769static const u32 dmactl_default =
3e76fdcb
RRD
770 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
771 BIT(DMA_CLEAR_COUNT_ENABLE) |
1da177e4 772 /* erratum 0116 workaround part 1 (use POLLING) */
3e76fdcb
RRD
773 (POLL_100_USEC << DESCRIPTOR_POLLING_RATE) |
774 BIT(DMA_VALID_BIT_POLLING_ENABLE) |
775 BIT(DMA_VALID_BIT_ENABLE) |
776 BIT(DMA_SCATTER_GATHER_ENABLE) |
1da177e4 777 /* erratum 0116 workaround part 2 (no AUTOSTART) */
3e76fdcb 778 BIT(DMA_ENABLE);
1da177e4
LT
779
780static inline void spin_stop_dma (struct net2280_dma_regs __iomem *dma)
781{
3e76fdcb 782 handshake(&dma->dmactl, BIT(DMA_ENABLE), 0, 50);
1da177e4
LT
783}
784
785static inline void stop_dma (struct net2280_dma_regs __iomem *dma)
786{
3e76fdcb 787 writel(readl(&dma->dmactl) & ~BIT(DMA_ENABLE), &dma->dmactl);
1da177e4
LT
788 spin_stop_dma (dma);
789}
790
791static void start_queue (struct net2280_ep *ep, u32 dmactl, u32 td_dma)
792{
793 struct net2280_dma_regs __iomem *dma = ep->dma;
3e76fdcb 794 unsigned int tmp = BIT(VALID_BIT) | (ep->is_in << DMA_DIRECTION);
1da177e4 795
950ee4c8 796 if (ep->dev->pdev->device != 0x2280)
3e76fdcb 797 tmp |= BIT(END_OF_CHAIN);
950ee4c8
GL
798
799 writel (tmp, &dma->dmacount);
1da177e4
LT
800 writel (readl (&dma->dmastat), &dma->dmastat);
801
802 writel (td_dma, &dma->dmadesc);
c2db8a8a 803 if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX)
3e76fdcb 804 dmactl |= BIT(DMA_REQUEST_OUTSTANDING);
1da177e4
LT
805 writel (dmactl, &dma->dmactl);
806
807 /* erratum 0116 workaround part 3: pci arbiter away from net2280 */
808 (void) readl (&ep->dev->pci->pcimstctl);
809
3e76fdcb 810 writel(BIT(DMA_START), &dma->dmastat);
1da177e4
LT
811
812 if (!ep->is_in)
813 stop_out_naking (ep);
814}
815
816static void start_dma (struct net2280_ep *ep, struct net2280_request *req)
817{
818 u32 tmp;
819 struct net2280_dma_regs __iomem *dma = ep->dma;
820
821 /* FIXME can't use DMA for ZLPs */
822
823 /* on this path we "know" there's no dma active (yet) */
3e76fdcb 824 WARN_ON(readl(&dma->dmactl) & BIT(DMA_ENABLE));
1da177e4
LT
825 writel (0, &ep->dma->dmactl);
826
827 /* previous OUT packet might have been short */
828 if (!ep->is_in && ((tmp = readl (&ep->regs->ep_stat))
3e76fdcb
RRD
829 & BIT(NAK_OUT_PACKETS)) != 0) {
830 writel(BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT),
1da177e4
LT
831 &ep->regs->ep_stat);
832
833 tmp = readl (&ep->regs->ep_avail);
834 if (tmp) {
835 writel (readl (&dma->dmastat), &dma->dmastat);
836
837 /* transfer all/some fifo data */
838 writel (req->req.dma, &dma->dmaaddr);
839 tmp = min (tmp, req->req.length);
840
841 /* dma irq, faking scatterlist status */
842 req->td->dmacount = cpu_to_le32 (req->req.length - tmp);
3e76fdcb 843 writel(BIT(DMA_DONE_INTERRUPT_ENABLE)
1da177e4
LT
844 | tmp, &dma->dmacount);
845 req->td->dmadesc = 0;
846 req->valid = 1;
847
3e76fdcb
RRD
848 writel(BIT(DMA_ENABLE), &dma->dmactl);
849 writel(BIT(DMA_START), &dma->dmastat);
1da177e4
LT
850 return;
851 }
852 }
853
854 tmp = dmactl_default;
855
856 /* force packet boundaries between dma requests, but prevent the
857 * controller from automagically writing a last "short" packet
858 * (zero length) unless the driver explicitly said to do that.
859 */
860 if (ep->is_in) {
861 if (likely ((req->req.length % ep->ep.maxpacket) != 0
862 || req->req.zero)) {
3e76fdcb 863 tmp |= BIT(DMA_FIFO_VALIDATE);
1da177e4
LT
864 ep->in_fifo_validate = 1;
865 } else
866 ep->in_fifo_validate = 0;
867 }
868
869 /* init req->td, pointing to the current dummy */
870 req->td->dmadesc = cpu_to_le32 (ep->td_dma);
871 fill_dma_desc (ep, req, 1);
872
873 if (!use_dma_chaining)
3e76fdcb 874 req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN));
1da177e4
LT
875
876 start_queue (ep, tmp, req->td_dma);
877}
878
adc82f77
RRD
879static inline void resume_dma(struct net2280_ep *ep)
880{
3e76fdcb 881 writel(readl(&ep->dma->dmactl) | BIT(DMA_ENABLE), &ep->dma->dmactl);
adc82f77
RRD
882
883 ep->dma_started = true;
884}
885
886static inline void ep_stop_dma(struct net2280_ep *ep)
887{
3e76fdcb 888 writel(readl(&ep->dma->dmactl) & ~BIT(DMA_ENABLE), &ep->dma->dmactl);
adc82f77
RRD
889 spin_stop_dma(ep->dma);
890
891 ep->dma_started = false;
892}
893
1da177e4
LT
894static inline void
895queue_dma (struct net2280_ep *ep, struct net2280_request *req, int valid)
896{
897 struct net2280_dma *end;
898 dma_addr_t tmp;
899
900 /* swap new dummy for old, link; fill and maybe activate */
901 end = ep->dummy;
902 ep->dummy = req->td;
903 req->td = end;
904
905 tmp = ep->td_dma;
906 ep->td_dma = req->td_dma;
907 req->td_dma = tmp;
908
909 end->dmadesc = cpu_to_le32 (ep->td_dma);
910
911 fill_dma_desc (ep, req, valid);
912}
913
914static void
915done (struct net2280_ep *ep, struct net2280_request *req, int status)
916{
917 struct net2280 *dev;
918 unsigned stopped = ep->stopped;
919
920 list_del_init (&req->queue);
921
922 if (req->req.status == -EINPROGRESS)
923 req->req.status = status;
924 else
925 status = req->req.status;
926
927 dev = ep->dev;
ae4d7933
FB
928 if (ep->dma)
929 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
1da177e4
LT
930
931 if (status && status != -ESHUTDOWN)
932 VDEBUG (dev, "complete %s req %p stat %d len %u/%u\n",
933 ep->ep.name, &req->req, status,
934 req->req.actual, req->req.length);
935
936 /* don't modify queue heads during completion callback */
937 ep->stopped = 1;
938 spin_unlock (&dev->lock);
939 req->req.complete (&ep->ep, &req->req);
940 spin_lock (&dev->lock);
941 ep->stopped = stopped;
942}
943
944/*-------------------------------------------------------------------------*/
945
946static int
55016f10 947net2280_queue (struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
1da177e4
LT
948{
949 struct net2280_request *req;
950 struct net2280_ep *ep;
951 struct net2280 *dev;
952 unsigned long flags;
953
954 /* we always require a cpu-view buffer, so that we can
955 * always use pio (as fallback or whatever).
956 */
957 req = container_of (_req, struct net2280_request, req);
958 if (!_req || !_req->complete || !_req->buf
959 || !list_empty (&req->queue))
960 return -EINVAL;
961 if (_req->length > (~0 & DMA_BYTE_COUNT_MASK))
962 return -EDOM;
963 ep = container_of (_ep, struct net2280_ep, ep);
964 if (!_ep || (!ep->desc && ep->num != 0))
965 return -EINVAL;
966 dev = ep->dev;
967 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
968 return -ESHUTDOWN;
969
970 /* FIXME implement PIO fallback for ZLPs with DMA */
971 if (ep->dma && _req->length == 0)
972 return -EOPNOTSUPP;
973
974 /* set up dma mapping in case the caller didn't */
ae4d7933
FB
975 if (ep->dma) {
976 int ret;
977
978 ret = usb_gadget_map_request(&dev->gadget, _req,
979 ep->is_in);
980 if (ret)
981 return ret;
1da177e4
LT
982 }
983
984#if 0
985 VDEBUG (dev, "%s queue req %p, len %d buf %p\n",
986 _ep->name, _req, _req->length, _req->buf);
987#endif
988
989 spin_lock_irqsave (&dev->lock, flags);
990
991 _req->status = -EINPROGRESS;
992 _req->actual = 0;
993
994 /* kickstart this i/o queue? */
995 if (list_empty (&ep->queue) && !ep->stopped) {
adc82f77
RRD
996 /* DMA request while EP halted */
997 if (ep->dma &&
3e76fdcb 998 (readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)) &&
c2db8a8a 999 (dev->pdev->vendor == PCI_VENDOR_ID_PLX)) {
adc82f77
RRD
1000 int valid = 1;
1001 if (ep->is_in) {
1002 int expect;
1003 expect = likely(req->req.zero ||
1004 ((req->req.length %
1005 ep->ep.maxpacket) != 0));
1006 if (expect != ep->in_fifo_validate)
1007 valid = 0;
1008 }
1009 queue_dma(ep, req, valid);
1010 }
1da177e4 1011 /* use DMA if the endpoint supports it, else pio */
adc82f77 1012 else if (ep->dma)
1da177e4
LT
1013 start_dma (ep, req);
1014 else {
1015 /* maybe there's no control data, just status ack */
1016 if (ep->num == 0 && _req->length == 0) {
1017 allow_status (ep);
1018 done (ep, req, 0);
1019 VDEBUG (dev, "%s status ack\n", ep->ep.name);
1020 goto done;
1021 }
1022
1023 /* PIO ... stuff the fifo, or unblock it. */
1024 if (ep->is_in)
1025 write_fifo (ep, _req);
1026 else if (list_empty (&ep->queue)) {
1027 u32 s;
1028
1029 /* OUT FIFO might have packet(s) buffered */
1030 s = readl (&ep->regs->ep_stat);
3e76fdcb 1031 if ((s & BIT(FIFO_EMPTY)) == 0) {
1da177e4
LT
1032 /* note: _req->short_not_ok is
1033 * ignored here since PIO _always_
1034 * stops queue advance here, and
1035 * _req->status doesn't change for
1036 * short reads (only _req->actual)
1037 */
1038 if (read_fifo (ep, req)) {
1039 done (ep, req, 0);
1040 if (ep->num == 0)
1041 allow_status (ep);
1042 /* don't queue it */
1043 req = NULL;
1044 } else
1045 s = readl (&ep->regs->ep_stat);
1046 }
1047
1048 /* don't NAK, let the fifo fill */
3e76fdcb
RRD
1049 if (req && (s & BIT(NAK_OUT_PACKETS)))
1050 writel(BIT(CLEAR_NAK_OUT_PACKETS),
1da177e4
LT
1051 &ep->regs->ep_rsp);
1052 }
1053 }
1054
1055 } else if (ep->dma) {
1056 int valid = 1;
1057
1058 if (ep->is_in) {
1059 int expect;
1060
1061 /* preventing magic zlps is per-engine state, not
1062 * per-transfer; irq logic must recover hiccups.
1063 */
1064 expect = likely (req->req.zero
1065 || (req->req.length % ep->ep.maxpacket) != 0);
1066 if (expect != ep->in_fifo_validate)
1067 valid = 0;
1068 }
1069 queue_dma (ep, req, valid);
1070
1071 } /* else the irq handler advances the queue. */
1072
1f26e28d 1073 ep->responded = 1;
1da177e4
LT
1074 if (req)
1075 list_add_tail (&req->queue, &ep->queue);
1076done:
1077 spin_unlock_irqrestore (&dev->lock, flags);
1078
1079 /* pci writes may still be posted */
1080 return 0;
1081}
1082
1083static inline void
1084dma_done (
1085 struct net2280_ep *ep,
1086 struct net2280_request *req,
1087 u32 dmacount,
1088 int status
1089)
1090{
1091 req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount);
1092 done (ep, req, status);
1093}
1094
1095static void restart_dma (struct net2280_ep *ep);
1096
1097static void scan_dma_completions (struct net2280_ep *ep)
1098{
1099 /* only look at descriptors that were "naturally" retired,
1100 * so fifo and list head state won't matter
1101 */
1102 while (!list_empty (&ep->queue)) {
1103 struct net2280_request *req;
1104 u32 tmp;
1105
1106 req = list_entry (ep->queue.next,
1107 struct net2280_request, queue);
1108 if (!req->valid)
1109 break;
1110 rmb ();
1111 tmp = le32_to_cpup (&req->td->dmacount);
3e76fdcb 1112 if ((tmp & BIT(VALID_BIT)) != 0)
1da177e4
LT
1113 break;
1114
1115 /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
1116 * cases where DMA must be aborted; this code handles
1117 * all non-abort DMA completions.
1118 */
1119 if (unlikely (req->td->dmadesc == 0)) {
1120 /* paranoia */
1121 tmp = readl (&ep->dma->dmacount);
1122 if (tmp & DMA_BYTE_COUNT_MASK)
1123 break;
1124 /* single transfer mode */
1125 dma_done (ep, req, tmp, 0);
1126 break;
1127 } else if (!ep->is_in
1128 && (req->req.length % ep->ep.maxpacket) != 0) {
1129 tmp = readl (&ep->regs->ep_stat);
c2db8a8a 1130 if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX)
adc82f77 1131 return dma_done(ep, req, tmp, 0);
1da177e4
LT
1132
1133 /* AVOID TROUBLE HERE by not issuing short reads from
1134 * your gadget driver. That helps avoids errata 0121,
1135 * 0122, and 0124; not all cases trigger the warning.
1136 */
3e76fdcb 1137 if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) {
b6c63937 1138 WARNING (ep->dev, "%s lost packet sync!\n",
1da177e4
LT
1139 ep->ep.name);
1140 req->req.status = -EOVERFLOW;
1141 } else if ((tmp = readl (&ep->regs->ep_avail)) != 0) {
1142 /* fifo gets flushed later */
1143 ep->out_overflow = 1;
1144 DEBUG (ep->dev, "%s dma, discard %d len %d\n",
1145 ep->ep.name, tmp,
1146 req->req.length);
1147 req->req.status = -EOVERFLOW;
1148 }
1149 }
1150 dma_done (ep, req, tmp, 0);
1151 }
1152}
1153
1154static void restart_dma (struct net2280_ep *ep)
1155{
1156 struct net2280_request *req;
1157 u32 dmactl = dmactl_default;
1158
1159 if (ep->stopped)
1160 return;
1161 req = list_entry (ep->queue.next, struct net2280_request, queue);
1162
1163 if (!use_dma_chaining) {
1164 start_dma (ep, req);
1165 return;
1166 }
1167
1168 /* the 2280 will be processing the queue unless queue hiccups after
1169 * the previous transfer:
1170 * IN: wanted automagic zlp, head doesn't (or vice versa)
1171 * DMA_FIFO_VALIDATE doesn't init from dma descriptors.
1172 * OUT: was "usb-short", we must restart.
1173 */
1174 if (ep->is_in && !req->valid) {
1175 struct net2280_request *entry, *prev = NULL;
1176 int reqmode, done = 0;
1177
1178 DEBUG (ep->dev, "%s dma hiccup td %p\n", ep->ep.name, req->td);
1179 ep->in_fifo_validate = likely (req->req.zero
1180 || (req->req.length % ep->ep.maxpacket) != 0);
1181 if (ep->in_fifo_validate)
3e76fdcb 1182 dmactl |= BIT(DMA_FIFO_VALIDATE);
1da177e4 1183 list_for_each_entry (entry, &ep->queue, queue) {
320f3459 1184 __le32 dmacount;
1da177e4
LT
1185
1186 if (entry == req)
1187 continue;
1188 dmacount = entry->td->dmacount;
1189 if (!done) {
1190 reqmode = likely (entry->req.zero
1191 || (entry->req.length
1192 % ep->ep.maxpacket) != 0);
1193 if (reqmode == ep->in_fifo_validate) {
1194 entry->valid = 1;
1195 dmacount |= valid_bit;
1196 entry->td->dmacount = dmacount;
1197 prev = entry;
1198 continue;
1199 } else {
1200 /* force a hiccup */
1201 prev->td->dmacount |= dma_done_ie;
1202 done = 1;
1203 }
1204 }
1205
1206 /* walk the rest of the queue so unlinks behave */
1207 entry->valid = 0;
1208 dmacount &= ~valid_bit;
1209 entry->td->dmacount = dmacount;
1210 prev = entry;
1211 }
1212 }
1213
1214 writel (0, &ep->dma->dmactl);
1215 start_queue (ep, dmactl, req->td_dma);
1216}
1217
adc82f77 1218static void abort_dma_228x(struct net2280_ep *ep)
1da177e4
LT
1219{
1220 /* abort the current transfer */
1221 if (likely (!list_empty (&ep->queue))) {
1222 /* FIXME work around errata 0121, 0122, 0124 */
3e76fdcb 1223 writel(BIT(DMA_ABORT), &ep->dma->dmastat);
1da177e4
LT
1224 spin_stop_dma (ep->dma);
1225 } else
1226 stop_dma (ep->dma);
1227 scan_dma_completions (ep);
1228}
1229
adc82f77
RRD
1230static void abort_dma_338x(struct net2280_ep *ep)
1231{
3e76fdcb 1232 writel(BIT(DMA_ABORT), &ep->dma->dmastat);
adc82f77
RRD
1233 spin_stop_dma(ep->dma);
1234}
1235
1236static void abort_dma(struct net2280_ep *ep)
1237{
c2db8a8a 1238 if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
adc82f77
RRD
1239 return abort_dma_228x(ep);
1240 return abort_dma_338x(ep);
1241}
1242
1da177e4
LT
1243/* dequeue ALL requests */
1244static void nuke (struct net2280_ep *ep)
1245{
1246 struct net2280_request *req;
1247
1248 /* called with spinlock held */
1249 ep->stopped = 1;
1250 if (ep->dma)
1251 abort_dma (ep);
1252 while (!list_empty (&ep->queue)) {
1253 req = list_entry (ep->queue.next,
1254 struct net2280_request,
1255 queue);
1256 done (ep, req, -ESHUTDOWN);
1257 }
1258}
1259
1260/* dequeue JUST ONE request */
1261static int net2280_dequeue (struct usb_ep *_ep, struct usb_request *_req)
1262{
1263 struct net2280_ep *ep;
1264 struct net2280_request *req;
1265 unsigned long flags;
1266 u32 dmactl;
1267 int stopped;
1268
1269 ep = container_of (_ep, struct net2280_ep, ep);
1270 if (!_ep || (!ep->desc && ep->num != 0) || !_req)
1271 return -EINVAL;
1272
1273 spin_lock_irqsave (&ep->dev->lock, flags);
1274 stopped = ep->stopped;
1275
1276 /* quiesce dma while we patch the queue */
1277 dmactl = 0;
1278 ep->stopped = 1;
1279 if (ep->dma) {
1280 dmactl = readl (&ep->dma->dmactl);
1281 /* WARNING erratum 0127 may kick in ... */
1282 stop_dma (ep->dma);
1283 scan_dma_completions (ep);
1284 }
1285
1286 /* make sure it's still queued on this endpoint */
1287 list_for_each_entry (req, &ep->queue, queue) {
1288 if (&req->req == _req)
1289 break;
1290 }
1291 if (&req->req != _req) {
1292 spin_unlock_irqrestore (&ep->dev->lock, flags);
1293 return -EINVAL;
1294 }
1295
1296 /* queue head may be partially complete. */
1297 if (ep->queue.next == &req->queue) {
1298 if (ep->dma) {
1299 DEBUG (ep->dev, "unlink (%s) dma\n", _ep->name);
1300 _req->status = -ECONNRESET;
1301 abort_dma (ep);
1302 if (likely (ep->queue.next == &req->queue)) {
1303 // NOTE: misreports single-transfer mode
1304 req->td->dmacount = 0; /* invalidate */
1305 dma_done (ep, req,
1306 readl (&ep->dma->dmacount),
1307 -ECONNRESET);
1308 }
1309 } else {
1310 DEBUG (ep->dev, "unlink (%s) pio\n", _ep->name);
1311 done (ep, req, -ECONNRESET);
1312 }
1313 req = NULL;
1314
1315 /* patch up hardware chaining data */
1316 } else if (ep->dma && use_dma_chaining) {
1317 if (req->queue.prev == ep->queue.next) {
1318 writel (le32_to_cpu (req->td->dmadesc),
1319 &ep->dma->dmadesc);
1320 if (req->td->dmacount & dma_done_ie)
1321 writel (readl (&ep->dma->dmacount)
320f3459 1322 | le32_to_cpu(dma_done_ie),
1da177e4
LT
1323 &ep->dma->dmacount);
1324 } else {
1325 struct net2280_request *prev;
1326
1327 prev = list_entry (req->queue.prev,
1328 struct net2280_request, queue);
1329 prev->td->dmadesc = req->td->dmadesc;
1330 if (req->td->dmacount & dma_done_ie)
1331 prev->td->dmacount |= dma_done_ie;
1332 }
1333 }
1334
1335 if (req)
1336 done (ep, req, -ECONNRESET);
1337 ep->stopped = stopped;
1338
1339 if (ep->dma) {
1340 /* turn off dma on inactive queues */
1341 if (list_empty (&ep->queue))
1342 stop_dma (ep->dma);
1343 else if (!ep->stopped) {
1344 /* resume current request, or start new one */
1345 if (req)
1346 writel (dmactl, &ep->dma->dmactl);
1347 else
1348 start_dma (ep, list_entry (ep->queue.next,
1349 struct net2280_request, queue));
1350 }
1351 }
1352
1353 spin_unlock_irqrestore (&ep->dev->lock, flags);
1354 return 0;
1355}
1356
1357/*-------------------------------------------------------------------------*/
1358
1359static int net2280_fifo_status (struct usb_ep *_ep);
1360
1361static int
8066134f 1362net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
1da177e4
LT
1363{
1364 struct net2280_ep *ep;
1365 unsigned long flags;
1366 int retval = 0;
1367
1368 ep = container_of (_ep, struct net2280_ep, ep);
1369 if (!_ep || (!ep->desc && ep->num != 0))
1370 return -EINVAL;
1371 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1372 return -ESHUTDOWN;
1373 if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03)
1374 == USB_ENDPOINT_XFER_ISOC)
1375 return -EINVAL;
1376
1377 spin_lock_irqsave (&ep->dev->lock, flags);
1378 if (!list_empty (&ep->queue))
1379 retval = -EAGAIN;
1380 else if (ep->is_in && value && net2280_fifo_status (_ep) != 0)
1381 retval = -EAGAIN;
1382 else {
8066134f
AS
1383 VDEBUG (ep->dev, "%s %s %s\n", _ep->name,
1384 value ? "set" : "clear",
1385 wedged ? "wedge" : "halt");
1da177e4
LT
1386 /* set/clear, then synch memory views with the device */
1387 if (value) {
1388 if (ep->num == 0)
1389 ep->dev->protocol_stall = 1;
1390 else
1391 set_halt (ep);
8066134f
AS
1392 if (wedged)
1393 ep->wedged = 1;
1394 } else {
1da177e4 1395 clear_halt (ep);
c2db8a8a 1396 if (ep->dev->pdev->vendor == PCI_VENDOR_ID_PLX &&
adc82f77
RRD
1397 !list_empty(&ep->queue) && ep->td_dma)
1398 restart_dma(ep);
8066134f
AS
1399 ep->wedged = 0;
1400 }
1da177e4
LT
1401 (void) readl (&ep->regs->ep_rsp);
1402 }
1403 spin_unlock_irqrestore (&ep->dev->lock, flags);
1404
1405 return retval;
1406}
1407
8066134f
AS
1408static int
1409net2280_set_halt(struct usb_ep *_ep, int value)
1410{
1411 return net2280_set_halt_and_wedge(_ep, value, 0);
1412}
1413
1414static int
1415net2280_set_wedge(struct usb_ep *_ep)
1416{
1417 if (!_ep || _ep->name == ep0name)
1418 return -EINVAL;
1419 return net2280_set_halt_and_wedge(_ep, 1, 1);
1420}
1421
1da177e4
LT
1422static int
1423net2280_fifo_status (struct usb_ep *_ep)
1424{
1425 struct net2280_ep *ep;
1426 u32 avail;
1427
1428 ep = container_of (_ep, struct net2280_ep, ep);
1429 if (!_ep || (!ep->desc && ep->num != 0))
1430 return -ENODEV;
1431 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1432 return -ESHUTDOWN;
1433
3e76fdcb 1434 avail = readl(&ep->regs->ep_avail) & (BIT(12) - 1);
1da177e4
LT
1435 if (avail > ep->fifo_size)
1436 return -EOVERFLOW;
1437 if (ep->is_in)
1438 avail = ep->fifo_size - avail;
1439 return avail;
1440}
1441
1442static void
1443net2280_fifo_flush (struct usb_ep *_ep)
1444{
1445 struct net2280_ep *ep;
1446
1447 ep = container_of (_ep, struct net2280_ep, ep);
1448 if (!_ep || (!ep->desc && ep->num != 0))
1449 return;
1450 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1451 return;
1452
3e76fdcb 1453 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
1da177e4
LT
1454 (void) readl (&ep->regs->ep_rsp);
1455}
1456
901b3d75 1457static const struct usb_ep_ops net2280_ep_ops = {
1da177e4
LT
1458 .enable = net2280_enable,
1459 .disable = net2280_disable,
1460
1461 .alloc_request = net2280_alloc_request,
1462 .free_request = net2280_free_request,
1463
1da177e4
LT
1464 .queue = net2280_queue,
1465 .dequeue = net2280_dequeue,
1466
1467 .set_halt = net2280_set_halt,
8066134f 1468 .set_wedge = net2280_set_wedge,
1da177e4
LT
1469 .fifo_status = net2280_fifo_status,
1470 .fifo_flush = net2280_fifo_flush,
1471};
1472
1473/*-------------------------------------------------------------------------*/
1474
1475static int net2280_get_frame (struct usb_gadget *_gadget)
1476{
1477 struct net2280 *dev;
1478 unsigned long flags;
1479 u16 retval;
1480
1481 if (!_gadget)
1482 return -ENODEV;
1483 dev = container_of (_gadget, struct net2280, gadget);
1484 spin_lock_irqsave (&dev->lock, flags);
1485 retval = get_idx_reg (dev->regs, REG_FRAME) & 0x03ff;
1486 spin_unlock_irqrestore (&dev->lock, flags);
1487 return retval;
1488}
1489
1490static int net2280_wakeup (struct usb_gadget *_gadget)
1491{
1492 struct net2280 *dev;
1493 u32 tmp;
1494 unsigned long flags;
1495
1496 if (!_gadget)
1497 return 0;
1498 dev = container_of (_gadget, struct net2280, gadget);
1499
1500 spin_lock_irqsave (&dev->lock, flags);
1501 tmp = readl (&dev->usb->usbctl);
3e76fdcb
RRD
1502 if (tmp & BIT(DEVICE_REMOTE_WAKEUP_ENABLE))
1503 writel(BIT(GENERATE_RESUME), &dev->usb->usbstat);
1da177e4
LT
1504 spin_unlock_irqrestore (&dev->lock, flags);
1505
1506 /* pci writes may still be posted */
1507 return 0;
1508}
1509
1510static int net2280_set_selfpowered (struct usb_gadget *_gadget, int value)
1511{
1512 struct net2280 *dev;
1513 u32 tmp;
1514 unsigned long flags;
1515
1516 if (!_gadget)
1517 return 0;
1518 dev = container_of (_gadget, struct net2280, gadget);
1519
1520 spin_lock_irqsave (&dev->lock, flags);
1521 tmp = readl (&dev->usb->usbctl);
adc82f77 1522 if (value) {
3e76fdcb 1523 tmp |= BIT(SELF_POWERED_STATUS);
adc82f77
RRD
1524 dev->selfpowered = 1;
1525 } else {
3e76fdcb 1526 tmp &= ~BIT(SELF_POWERED_STATUS);
adc82f77
RRD
1527 dev->selfpowered = 0;
1528 }
1da177e4
LT
1529 writel (tmp, &dev->usb->usbctl);
1530 spin_unlock_irqrestore (&dev->lock, flags);
1531
1532 return 0;
1533}
1534
1535static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
1536{
1537 struct net2280 *dev;
1538 u32 tmp;
1539 unsigned long flags;
1540
1541 if (!_gadget)
1542 return -ENODEV;
1543 dev = container_of (_gadget, struct net2280, gadget);
1544
1545 spin_lock_irqsave (&dev->lock, flags);
1546 tmp = readl (&dev->usb->usbctl);
1547 dev->softconnect = (is_on != 0);
1548 if (is_on)
3e76fdcb 1549 tmp |= BIT(USB_DETECT_ENABLE);
1da177e4 1550 else
3e76fdcb 1551 tmp &= ~BIT(USB_DETECT_ENABLE);
1da177e4
LT
1552 writel (tmp, &dev->usb->usbctl);
1553 spin_unlock_irqrestore (&dev->lock, flags);
1554
1555 return 0;
1556}
1557
4cf5e00b
FB
1558static int net2280_start(struct usb_gadget *_gadget,
1559 struct usb_gadget_driver *driver);
1560static int net2280_stop(struct usb_gadget *_gadget,
1561 struct usb_gadget_driver *driver);
0f91349b 1562
1da177e4
LT
1563static const struct usb_gadget_ops net2280_ops = {
1564 .get_frame = net2280_get_frame,
1565 .wakeup = net2280_wakeup,
1566 .set_selfpowered = net2280_set_selfpowered,
1567 .pullup = net2280_pullup,
4cf5e00b
FB
1568 .udc_start = net2280_start,
1569 .udc_stop = net2280_stop,
1da177e4
LT
1570};
1571
1572/*-------------------------------------------------------------------------*/
1573
1574#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1575
1576/* FIXME move these into procfs, and use seq_file.
1577 * Sysfs _still_ doesn't behave for arbitrarily sized files,
1578 * and also doesn't help products using this with 2.4 kernels.
1579 */
1580
1581/* "function" sysfs attribute */
ce26bd23
GKH
1582static ssize_t function_show(struct device *_dev, struct device_attribute *attr,
1583 char *buf)
1da177e4
LT
1584{
1585 struct net2280 *dev = dev_get_drvdata (_dev);
1586
1587 if (!dev->driver
1588 || !dev->driver->function
1589 || strlen (dev->driver->function) > PAGE_SIZE)
1590 return 0;
1591 return scnprintf (buf, PAGE_SIZE, "%s\n", dev->driver->function);
1592}
ce26bd23 1593static DEVICE_ATTR_RO(function);
1da177e4 1594
ce26bd23
GKH
1595static ssize_t registers_show(struct device *_dev,
1596 struct device_attribute *attr, char *buf)
1da177e4
LT
1597{
1598 struct net2280 *dev;
1599 char *next;
1600 unsigned size, t;
1601 unsigned long flags;
1602 int i;
1603 u32 t1, t2;
30e69598 1604 const char *s;
1da177e4
LT
1605
1606 dev = dev_get_drvdata (_dev);
1607 next = buf;
1608 size = PAGE_SIZE;
1609 spin_lock_irqsave (&dev->lock, flags);
1610
1611 if (dev->driver)
1612 s = dev->driver->driver.name;
1613 else
1614 s = "(none)";
1615
1616 /* Main Control Registers */
1617 t = scnprintf (next, size, "%s version " DRIVER_VERSION
1618 ", chiprev %04x, dma %s\n\n"
1619 "devinit %03x fifoctl %08x gadget '%s'\n"
1620 "pci irqenb0 %02x irqenb1 %08x "
1621 "irqstat0 %04x irqstat1 %08x\n",
1622 driver_name, dev->chiprev,
1623 use_dma
1624 ? (use_dma_chaining ? "chaining" : "enabled")
1625 : "disabled",
1626 readl (&dev->regs->devinit),
1627 readl (&dev->regs->fifoctl),
1628 s,
1629 readl (&dev->regs->pciirqenb0),
1630 readl (&dev->regs->pciirqenb1),
1631 readl (&dev->regs->irqstat0),
1632 readl (&dev->regs->irqstat1));
1633 size -= t;
1634 next += t;
1635
1636 /* USB Control Registers */
1637 t1 = readl (&dev->usb->usbctl);
1638 t2 = readl (&dev->usb->usbstat);
3e76fdcb
RRD
1639 if (t1 & BIT(VBUS_PIN)) {
1640 if (t2 & BIT(HIGH_SPEED))
1da177e4
LT
1641 s = "high speed";
1642 else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1643 s = "powered";
1644 else
1645 s = "full speed";
1646 /* full speed bit (6) not working?? */
1647 } else
1648 s = "not attached";
1649 t = scnprintf (next, size,
1650 "stdrsp %08x usbctl %08x usbstat %08x "
1651 "addr 0x%02x (%s)\n",
1652 readl (&dev->usb->stdrsp), t1, t2,
1653 readl (&dev->usb->ouraddr), s);
1654 size -= t;
1655 next += t;
1656
1657 /* PCI Master Control Registers */
1658
1659 /* DMA Control Registers */
1660
1661 /* Configurable EP Control Registers */
adc82f77 1662 for (i = 0; i < dev->n_ep; i++) {
1da177e4
LT
1663 struct net2280_ep *ep;
1664
1665 ep = &dev->ep [i];
1666 if (i && !ep->desc)
1667 continue;
1668
adc82f77 1669 t1 = readl(&ep->cfg->ep_cfg);
1da177e4
LT
1670 t2 = readl (&ep->regs->ep_rsp) & 0xff;
1671 t = scnprintf (next, size,
1672 "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s"
1673 "irqenb %02x\n",
1674 ep->ep.name, t1, t2,
3e76fdcb 1675 (t2 & BIT(CLEAR_NAK_OUT_PACKETS))
1da177e4 1676 ? "NAK " : "",
3e76fdcb 1677 (t2 & BIT(CLEAR_EP_HIDE_STATUS_PHASE))
1da177e4 1678 ? "hide " : "",
3e76fdcb 1679 (t2 & BIT(CLEAR_EP_FORCE_CRC_ERROR))
1da177e4 1680 ? "CRC " : "",
3e76fdcb 1681 (t2 & BIT(CLEAR_INTERRUPT_MODE))
1da177e4 1682 ? "interrupt " : "",
3e76fdcb 1683 (t2 & BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE))
1da177e4 1684 ? "status " : "",
3e76fdcb 1685 (t2 & BIT(CLEAR_NAK_OUT_PACKETS_MODE))
1da177e4 1686 ? "NAKmode " : "",
3e76fdcb 1687 (t2 & BIT(CLEAR_ENDPOINT_TOGGLE))
1da177e4 1688 ? "DATA1 " : "DATA0 ",
3e76fdcb 1689 (t2 & BIT(CLEAR_ENDPOINT_HALT))
1da177e4
LT
1690 ? "HALT " : "",
1691 readl (&ep->regs->ep_irqenb));
1692 size -= t;
1693 next += t;
1694
1695 t = scnprintf (next, size,
1696 "\tstat %08x avail %04x "
1697 "(ep%d%s-%s)%s\n",
1698 readl (&ep->regs->ep_stat),
1699 readl (&ep->regs->ep_avail),
1700 t1 & 0x0f, DIR_STRING (t1),
1701 type_string (t1 >> 8),
1702 ep->stopped ? "*" : "");
1703 size -= t;
1704 next += t;
1705
1706 if (!ep->dma)
1707 continue;
1708
1709 t = scnprintf (next, size,
1710 " dma\tctl %08x stat %08x count %08x\n"
1711 "\taddr %08x desc %08x\n",
1712 readl (&ep->dma->dmactl),
1713 readl (&ep->dma->dmastat),
1714 readl (&ep->dma->dmacount),
1715 readl (&ep->dma->dmaaddr),
1716 readl (&ep->dma->dmadesc));
1717 size -= t;
1718 next += t;
1719
1720 }
1721
1722 /* Indexed Registers */
901b3d75 1723 // none yet
1da177e4
LT
1724
1725 /* Statistics */
1726 t = scnprintf (next, size, "\nirqs: ");
1727 size -= t;
1728 next += t;
adc82f77 1729 for (i = 0; i < dev->n_ep; i++) {
1da177e4
LT
1730 struct net2280_ep *ep;
1731
1732 ep = &dev->ep [i];
1733 if (i && !ep->irqs)
1734 continue;
1735 t = scnprintf (next, size, " %s/%lu", ep->ep.name, ep->irqs);
1736 size -= t;
1737 next += t;
1738
1739 }
1740 t = scnprintf (next, size, "\n");
1741 size -= t;
1742 next += t;
1743
1744 spin_unlock_irqrestore (&dev->lock, flags);
1745
1746 return PAGE_SIZE - size;
1747}
ce26bd23 1748static DEVICE_ATTR_RO(registers);
1da177e4 1749
ce26bd23
GKH
1750static ssize_t queues_show(struct device *_dev, struct device_attribute *attr,
1751 char *buf)
1da177e4
LT
1752{
1753 struct net2280 *dev;
1754 char *next;
1755 unsigned size;
1756 unsigned long flags;
1757 int i;
1758
1759 dev = dev_get_drvdata (_dev);
1760 next = buf;
1761 size = PAGE_SIZE;
1762 spin_lock_irqsave (&dev->lock, flags);
1763
adc82f77 1764 for (i = 0; i < dev->n_ep; i++) {
1da177e4
LT
1765 struct net2280_ep *ep = &dev->ep [i];
1766 struct net2280_request *req;
1767 int t;
1768
1769 if (i != 0) {
1770 const struct usb_endpoint_descriptor *d;
1771
1772 d = ep->desc;
1773 if (!d)
1774 continue;
1775 t = d->bEndpointAddress;
1776 t = scnprintf (next, size,
1777 "\n%s (ep%d%s-%s) max %04x %s fifo %d\n",
1778 ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
1779 (t & USB_DIR_IN) ? "in" : "out",
a27f37a1 1780 type_string(d->bmAttributes),
29cc8897 1781 usb_endpoint_maxp (d) & 0x1fff,
1da177e4
LT
1782 ep->dma ? "dma" : "pio", ep->fifo_size
1783 );
1784 } else /* ep0 should only have one transfer queued */
1785 t = scnprintf (next, size, "ep0 max 64 pio %s\n",
1786 ep->is_in ? "in" : "out");
1787 if (t <= 0 || t > size)
1788 goto done;
1789 size -= t;
1790 next += t;
1791
1792 if (list_empty (&ep->queue)) {
1793 t = scnprintf (next, size, "\t(nothing queued)\n");
1794 if (t <= 0 || t > size)
1795 goto done;
1796 size -= t;
1797 next += t;
1798 continue;
1799 }
1800 list_for_each_entry (req, &ep->queue, queue) {
1801 if (ep->dma && req->td_dma == readl (&ep->dma->dmadesc))
1802 t = scnprintf (next, size,
1803 "\treq %p len %d/%d "
1804 "buf %p (dmacount %08x)\n",
1805 &req->req, req->req.actual,
1806 req->req.length, req->req.buf,
1807 readl (&ep->dma->dmacount));
1808 else
1809 t = scnprintf (next, size,
1810 "\treq %p len %d/%d buf %p\n",
1811 &req->req, req->req.actual,
1812 req->req.length, req->req.buf);
1813 if (t <= 0 || t > size)
1814 goto done;
1815 size -= t;
1816 next += t;
1817
1818 if (ep->dma) {
1819 struct net2280_dma *td;
1820
1821 td = req->td;
1822 t = scnprintf (next, size, "\t td %08x "
1823 " count %08x buf %08x desc %08x\n",
1824 (u32) req->td_dma,
1825 le32_to_cpu (td->dmacount),
1826 le32_to_cpu (td->dmaaddr),
1827 le32_to_cpu (td->dmadesc));
1828 if (t <= 0 || t > size)
1829 goto done;
1830 size -= t;
1831 next += t;
1832 }
1833 }
1834 }
1835
1836done:
1837 spin_unlock_irqrestore (&dev->lock, flags);
1838 return PAGE_SIZE - size;
1839}
ce26bd23 1840static DEVICE_ATTR_RO(queues);
1da177e4
LT
1841
1842
1843#else
1844
9950421c
LT
1845#define device_create_file(a,b) (0)
1846#define device_remove_file(a,b) do { } while (0)
1da177e4
LT
1847
1848#endif
1849
1850/*-------------------------------------------------------------------------*/
1851
1852/* another driver-specific mode might be a request type doing dma
1853 * to/from another device fifo instead of to/from memory.
1854 */
1855
1856static void set_fifo_mode (struct net2280 *dev, int mode)
1857{
1858 /* keeping high bits preserves BAR2 */
1859 writel ((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl);
1860
1861 /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */
1862 INIT_LIST_HEAD (&dev->gadget.ep_list);
1863 list_add_tail (&dev->ep [1].ep.ep_list, &dev->gadget.ep_list);
1864 list_add_tail (&dev->ep [2].ep.ep_list, &dev->gadget.ep_list);
1865 switch (mode) {
1866 case 0:
1867 list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list);
1868 list_add_tail (&dev->ep [4].ep.ep_list, &dev->gadget.ep_list);
1869 dev->ep [1].fifo_size = dev->ep [2].fifo_size = 1024;
1870 break;
1871 case 1:
1872 dev->ep [1].fifo_size = dev->ep [2].fifo_size = 2048;
1873 break;
1874 case 2:
1875 list_add_tail (&dev->ep [3].ep.ep_list, &dev->gadget.ep_list);
1876 dev->ep [1].fifo_size = 2048;
1877 dev->ep [2].fifo_size = 1024;
1878 break;
1879 }
1880 /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */
1881 list_add_tail (&dev->ep [5].ep.ep_list, &dev->gadget.ep_list);
1882 list_add_tail (&dev->ep [6].ep.ep_list, &dev->gadget.ep_list);
1883}
1884
adc82f77
RRD
1885static void defect7374_disable_data_eps(struct net2280 *dev)
1886{
1887 /*
1888 * For Defect 7374, disable data EPs (and more):
1889 * - This phase undoes the earlier phase of the Defect 7374 workaround,
1890 * returing ep regs back to normal.
1891 */
1892 struct net2280_ep *ep;
1893 int i;
1894 unsigned char ep_sel;
1895 u32 tmp_reg;
1896
1897 for (i = 1; i < 5; i++) {
1898 ep = &dev->ep[i];
1899 writel(0, &ep->cfg->ep_cfg);
1900 }
1901
1902 /* CSROUT, CSRIN, PCIOUT, PCIIN, STATIN, RCIN */
1903 for (i = 0; i < 6; i++)
1904 writel(0, &dev->dep[i].dep_cfg);
1905
1906 for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
1907 /* Select an endpoint for subsequent operations: */
1908 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
1909 writel(((tmp_reg & ~0x1f) | ep_sel), &dev->plregs->pl_ep_ctrl);
1910
1911 if (ep_sel < 2 || (ep_sel > 9 && ep_sel < 14) ||
1912 ep_sel == 18 || ep_sel == 20)
1913 continue;
1914
1915 /* Change settings on some selected endpoints */
1916 tmp_reg = readl(&dev->plregs->pl_ep_cfg_4);
3e76fdcb 1917 tmp_reg &= ~BIT(NON_CTRL_IN_TOLERATE_BAD_DIR);
adc82f77
RRD
1918 writel(tmp_reg, &dev->plregs->pl_ep_cfg_4);
1919 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
3e76fdcb 1920 tmp_reg |= BIT(EP_INITIALIZED);
adc82f77
RRD
1921 writel(tmp_reg, &dev->plregs->pl_ep_ctrl);
1922 }
1923}
1924
1925static void defect7374_enable_data_eps_zero(struct net2280 *dev)
1926{
1927 u32 tmp = 0, tmp_reg;
1928 u32 fsmvalue, scratch;
1929 int i;
1930 unsigned char ep_sel;
1931
1932 scratch = get_idx_reg(dev->regs, SCRATCH);
1933 fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD);
1934 scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
1935
1936 /*See if firmware needs to set up for workaround*/
1937 if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) {
1938 WARNING(dev, "Operate Defect 7374 workaround soft this time");
1939 WARNING(dev, "It will operate on cold-reboot and SS connect");
1940
1941 /*GPEPs:*/
3e76fdcb 1942 tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_DIRECTION) |
adc82f77
RRD
1943 (2 << OUT_ENDPOINT_TYPE) | (2 << IN_ENDPOINT_TYPE) |
1944 ((dev->enhanced_mode) ?
3e76fdcb
RRD
1945 BIT(OUT_ENDPOINT_ENABLE) : BIT(ENDPOINT_ENABLE)) |
1946 BIT(IN_ENDPOINT_ENABLE));
adc82f77
RRD
1947
1948 for (i = 1; i < 5; i++)
1949 writel(tmp, &dev->ep[i].cfg->ep_cfg);
1950
1951 /* CSRIN, PCIIN, STATIN, RCIN*/
3e76fdcb 1952 tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_ENABLE));
adc82f77
RRD
1953 writel(tmp, &dev->dep[1].dep_cfg);
1954 writel(tmp, &dev->dep[3].dep_cfg);
1955 writel(tmp, &dev->dep[4].dep_cfg);
1956 writel(tmp, &dev->dep[5].dep_cfg);
1957
1958 /*Implemented for development and debug.
1959 * Can be refined/tuned later.*/
1960 for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
1961 /* Select an endpoint for subsequent operations: */
1962 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
1963 writel(((tmp_reg & ~0x1f) | ep_sel),
1964 &dev->plregs->pl_ep_ctrl);
1965
1966 if (ep_sel == 1) {
1967 tmp =
1968 (readl(&dev->plregs->pl_ep_ctrl) |
3e76fdcb 1969 BIT(CLEAR_ACK_ERROR_CODE) | 0);
adc82f77
RRD
1970 writel(tmp, &dev->plregs->pl_ep_ctrl);
1971 continue;
1972 }
1973
1974 if (ep_sel == 0 || (ep_sel > 9 && ep_sel < 14) ||
1975 ep_sel == 18 || ep_sel == 20)
1976 continue;
1977
1978 tmp = (readl(&dev->plregs->pl_ep_cfg_4) |
3e76fdcb 1979 BIT(NON_CTRL_IN_TOLERATE_BAD_DIR) | 0);
adc82f77
RRD
1980 writel(tmp, &dev->plregs->pl_ep_cfg_4);
1981
1982 tmp = readl(&dev->plregs->pl_ep_ctrl) &
3e76fdcb 1983 ~BIT(EP_INITIALIZED);
adc82f77
RRD
1984 writel(tmp, &dev->plregs->pl_ep_ctrl);
1985
1986 }
1987
1988 /* Set FSM to focus on the first Control Read:
1989 * - Tip: Connection speed is known upon the first
1990 * setup request.*/
1991 scratch |= DEFECT7374_FSM_WAITING_FOR_CONTROL_READ;
1992 set_idx_reg(dev->regs, SCRATCH, scratch);
1993
1994 } else{
1995 WARNING(dev, "Defect 7374 workaround soft will NOT operate");
1996 WARNING(dev, "It will operate on cold-reboot and SS connect");
1997 }
1998}
1999
1da177e4
LT
2000/* keeping it simple:
2001 * - one bus driver, initted first;
2002 * - one function driver, initted second
2003 *
2004 * most of the work to support multiple net2280 controllers would
2005 * be to associate this gadget driver (yes?) with all of them, or
2006 * perhaps to bind specific drivers to specific devices.
2007 */
2008
adc82f77 2009static void usb_reset_228x(struct net2280 *dev)
1da177e4
LT
2010{
2011 u32 tmp;
2012
2013 dev->gadget.speed = USB_SPEED_UNKNOWN;
2014 (void) readl (&dev->usb->usbctl);
2015
2016 net2280_led_init (dev);
2017
2018 /* disable automatic responses, and irqs */
2019 writel (0, &dev->usb->stdrsp);
2020 writel (0, &dev->regs->pciirqenb0);
2021 writel (0, &dev->regs->pciirqenb1);
2022
2023 /* clear old dma and irq state */
2024 for (tmp = 0; tmp < 4; tmp++) {
adc82f77 2025 struct net2280_ep *ep = &dev->ep[tmp + 1];
1da177e4 2026 if (ep->dma)
adc82f77 2027 abort_dma(ep);
1da177e4 2028 }
adc82f77 2029
1da177e4 2030 writel (~0, &dev->regs->irqstat0),
3e76fdcb 2031 writel(~(u32)BIT(SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1),
1da177e4
LT
2032
2033 /* reset, and enable pci */
3e76fdcb
RRD
2034 tmp = readl(&dev->regs->devinit) |
2035 BIT(PCI_ENABLE) |
2036 BIT(FIFO_SOFT_RESET) |
2037 BIT(USB_SOFT_RESET) |
2038 BIT(M8051_RESET);
1da177e4
LT
2039 writel (tmp, &dev->regs->devinit);
2040
2041 /* standard fifo and endpoint allocations */
2042 set_fifo_mode (dev, (fifo_mode <= 2) ? fifo_mode : 0);
2043}
2044
adc82f77
RRD
2045static void usb_reset_338x(struct net2280 *dev)
2046{
2047 u32 tmp;
2048 u32 fsmvalue;
2049
2050 dev->gadget.speed = USB_SPEED_UNKNOWN;
2051 (void)readl(&dev->usb->usbctl);
2052
2053 net2280_led_init(dev);
2054
2055 fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
2056 (0xf << DEFECT7374_FSM_FIELD);
2057
2058 /* See if firmware needs to set up for workaround: */
2059 if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) {
2060 INFO(dev, "%s: Defect 7374 FsmValue 0x%08x\n", __func__,
2061 fsmvalue);
2062 } else {
2063 /* disable automatic responses, and irqs */
2064 writel(0, &dev->usb->stdrsp);
2065 writel(0, &dev->regs->pciirqenb0);
2066 writel(0, &dev->regs->pciirqenb1);
2067 }
2068
2069 /* clear old dma and irq state */
2070 for (tmp = 0; tmp < 4; tmp++) {
2071 struct net2280_ep *ep = &dev->ep[tmp + 1];
2072
2073 if (ep->dma)
2074 abort_dma(ep);
2075 }
2076
2077 writel(~0, &dev->regs->irqstat0), writel(~0, &dev->regs->irqstat1);
2078
2079 if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) {
2080 /* reset, and enable pci */
2081 tmp = readl(&dev->regs->devinit) |
3e76fdcb
RRD
2082 BIT(PCI_ENABLE) |
2083 BIT(FIFO_SOFT_RESET) |
2084 BIT(USB_SOFT_RESET) |
2085 BIT(M8051_RESET);
adc82f77
RRD
2086
2087 writel(tmp, &dev->regs->devinit);
2088 }
2089
2090 /* always ep-{1,2,3,4} ... maybe not ep-3 or ep-4 */
2091 INIT_LIST_HEAD(&dev->gadget.ep_list);
2092
2093 for (tmp = 1; tmp < dev->n_ep; tmp++)
2094 list_add_tail(&dev->ep[tmp].ep.ep_list, &dev->gadget.ep_list);
2095
2096}
2097
2098static void usb_reset(struct net2280 *dev)
2099{
c2db8a8a 2100 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
adc82f77
RRD
2101 return usb_reset_228x(dev);
2102 return usb_reset_338x(dev);
2103}
2104
2105static void usb_reinit_228x(struct net2280 *dev)
1da177e4
LT
2106{
2107 u32 tmp;
2108 int init_dma;
2109
2110 /* use_dma changes are ignored till next device re-init */
2111 init_dma = use_dma;
2112
2113 /* basic endpoint init */
2114 for (tmp = 0; tmp < 7; tmp++) {
2115 struct net2280_ep *ep = &dev->ep [tmp];
2116
2117 ep->ep.name = ep_name [tmp];
2118 ep->dev = dev;
2119 ep->num = tmp;
2120
2121 if (tmp > 0 && tmp <= 4) {
2122 ep->fifo_size = 1024;
2123 if (init_dma)
2124 ep->dma = &dev->dma [tmp - 1];
2125 } else
2126 ep->fifo_size = 64;
2127 ep->regs = &dev->epregs [tmp];
adc82f77
RRD
2128 ep->cfg = &dev->epregs[tmp];
2129 ep_reset_228x(dev->regs, ep);
1da177e4 2130 }
e117e742
RB
2131 usb_ep_set_maxpacket_limit(&dev->ep [0].ep, 64);
2132 usb_ep_set_maxpacket_limit(&dev->ep [5].ep, 64);
2133 usb_ep_set_maxpacket_limit(&dev->ep [6].ep, 64);
1da177e4
LT
2134
2135 dev->gadget.ep0 = &dev->ep [0].ep;
2136 dev->ep [0].stopped = 0;
2137 INIT_LIST_HEAD (&dev->gadget.ep0->ep_list);
2138
2139 /* we want to prevent lowlevel/insecure access from the USB host,
2140 * but erratum 0119 means this enable bit is ignored
2141 */
2142 for (tmp = 0; tmp < 5; tmp++)
2143 writel (EP_DONTUSE, &dev->dep [tmp].dep_cfg);
2144}
2145
adc82f77
RRD
2146static void usb_reinit_338x(struct net2280 *dev)
2147{
2148 int init_dma;
2149 int i;
2150 u32 tmp, val;
2151 u32 fsmvalue;
2152 static const u32 ne[9] = { 0, 1, 2, 3, 4, 1, 2, 3, 4 };
2153 static const u32 ep_reg_addr[9] = { 0x00, 0xC0, 0x00, 0xC0, 0x00,
2154 0x00, 0xC0, 0x00, 0xC0 };
2155
2156 /* use_dma changes are ignored till next device re-init */
2157 init_dma = use_dma;
2158
2159 /* basic endpoint init */
2160 for (i = 0; i < dev->n_ep; i++) {
2161 struct net2280_ep *ep = &dev->ep[i];
2162
2163 ep->ep.name = ep_name[i];
2164 ep->dev = dev;
2165 ep->num = i;
2166
2167 if (i > 0 && i <= 4 && init_dma)
2168 ep->dma = &dev->dma[i - 1];
2169
2170 if (dev->enhanced_mode) {
2171 ep->cfg = &dev->epregs[ne[i]];
2172 ep->regs = (struct net2280_ep_regs __iomem *)
2173 (((void *)&dev->epregs[ne[i]]) +
2174 ep_reg_addr[i]);
2175 ep->fiforegs = &dev->fiforegs[i];
2176 } else {
2177 ep->cfg = &dev->epregs[i];
2178 ep->regs = &dev->epregs[i];
2179 ep->fiforegs = &dev->fiforegs[i];
2180 }
2181
2182 ep->fifo_size = (i != 0) ? 2048 : 512;
2183
2184 ep_reset_338x(dev->regs, ep);
2185 }
2186 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 512);
2187
2188 dev->gadget.ep0 = &dev->ep[0].ep;
2189 dev->ep[0].stopped = 0;
2190
2191 /* Link layer set up */
2192 fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
2193 (0xf << DEFECT7374_FSM_FIELD);
2194
2195 /* See if driver needs to set up for workaround: */
2196 if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ)
2197 INFO(dev, "%s: Defect 7374 FsmValue %08x\n",
2198 __func__, fsmvalue);
2199 else {
2200 tmp = readl(&dev->usb_ext->usbctl2) &
3e76fdcb 2201 ~(BIT(U1_ENABLE) | BIT(U2_ENABLE) | BIT(LTM_ENABLE));
adc82f77
RRD
2202 writel(tmp, &dev->usb_ext->usbctl2);
2203 }
2204
2205 /* Hardware Defect and Workaround */
2206 val = readl(&dev->ll_lfps_regs->ll_lfps_5);
2207 val &= ~(0xf << TIMER_LFPS_6US);
2208 val |= 0x5 << TIMER_LFPS_6US;
2209 writel(val, &dev->ll_lfps_regs->ll_lfps_5);
2210
2211 val = readl(&dev->ll_lfps_regs->ll_lfps_6);
2212 val &= ~(0xffff << TIMER_LFPS_80US);
2213 val |= 0x0100 << TIMER_LFPS_80US;
2214 writel(val, &dev->ll_lfps_regs->ll_lfps_6);
2215
2216 /*
2217 * AA_AB Errata. Issue 4. Workaround for SuperSpeed USB
2218 * Hot Reset Exit Handshake may Fail in Specific Case using
2219 * Default Register Settings. Workaround for Enumeration test.
2220 */
2221 val = readl(&dev->ll_tsn_regs->ll_tsn_counters_2);
2222 val &= ~(0x1f << HOT_TX_NORESET_TS2);
2223 val |= 0x10 << HOT_TX_NORESET_TS2;
2224 writel(val, &dev->ll_tsn_regs->ll_tsn_counters_2);
2225
2226 val = readl(&dev->ll_tsn_regs->ll_tsn_counters_3);
2227 val &= ~(0x1f << HOT_RX_RESET_TS2);
2228 val |= 0x3 << HOT_RX_RESET_TS2;
2229 writel(val, &dev->ll_tsn_regs->ll_tsn_counters_3);
2230
2231 /*
2232 * Set Recovery Idle to Recover bit:
2233 * - On SS connections, setting Recovery Idle to Recover Fmw improves
2234 * link robustness with various hosts and hubs.
2235 * - It is safe to set for all connection speeds; all chip revisions.
2236 * - R-M-W to leave other bits undisturbed.
2237 * - Reference PLX TT-7372
2238 */
2239 val = readl(&dev->ll_chicken_reg->ll_tsn_chicken_bit);
3e76fdcb 2240 val |= BIT(RECOVERY_IDLE_TO_RECOVER_FMW);
adc82f77
RRD
2241 writel(val, &dev->ll_chicken_reg->ll_tsn_chicken_bit);
2242
2243 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2244
2245 /* disable dedicated endpoints */
2246 writel(0x0D, &dev->dep[0].dep_cfg);
2247 writel(0x0D, &dev->dep[1].dep_cfg);
2248 writel(0x0E, &dev->dep[2].dep_cfg);
2249 writel(0x0E, &dev->dep[3].dep_cfg);
2250 writel(0x0F, &dev->dep[4].dep_cfg);
2251 writel(0x0C, &dev->dep[5].dep_cfg);
2252}
2253
2254static void usb_reinit(struct net2280 *dev)
2255{
c2db8a8a 2256 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
adc82f77
RRD
2257 return usb_reinit_228x(dev);
2258 return usb_reinit_338x(dev);
2259}
2260
2261static void ep0_start_228x(struct net2280 *dev)
1da177e4 2262{
3e76fdcb
RRD
2263 writel(BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
2264 BIT(CLEAR_NAK_OUT_PACKETS) |
2265 BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE)
1da177e4
LT
2266 , &dev->epregs [0].ep_rsp);
2267
2268 /*
2269 * hardware optionally handles a bunch of standard requests
2270 * that the API hides from drivers anyway. have it do so.
2271 * endpoint status/features are handled in software, to
2272 * help pass tests for some dubious behavior.
2273 */
3e76fdcb
RRD
2274 writel(BIT(SET_TEST_MODE) |
2275 BIT(SET_ADDRESS) |
2276 BIT(DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP) |
2277 BIT(GET_DEVICE_STATUS) |
2278 BIT(GET_INTERFACE_STATUS)
1da177e4 2279 , &dev->usb->stdrsp);
3e76fdcb
RRD
2280 writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
2281 BIT(SELF_POWERED_USB_DEVICE) |
2282 BIT(REMOTE_WAKEUP_SUPPORT) |
2283 (dev->softconnect << USB_DETECT_ENABLE) |
2284 BIT(SELF_POWERED_STATUS),
2285 &dev->usb->usbctl);
1da177e4
LT
2286
2287 /* enable irqs so we can see ep0 and general operation */
3e76fdcb
RRD
2288 writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
2289 BIT(ENDPOINT_0_INTERRUPT_ENABLE),
2290 &dev->regs->pciirqenb0);
2291 writel(BIT(PCI_INTERRUPT_ENABLE) |
2292 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE) |
2293 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE) |
2294 BIT(PCI_RETRY_ABORT_INTERRUPT_ENABLE) |
2295 BIT(VBUS_INTERRUPT_ENABLE) |
2296 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
2297 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE),
2298 &dev->regs->pciirqenb1);
1da177e4
LT
2299
2300 /* don't leave any writes posted */
2301 (void) readl (&dev->usb->usbctl);
2302}
2303
adc82f77
RRD
2304static void ep0_start_338x(struct net2280 *dev)
2305{
2306 u32 fsmvalue;
2307
2308 fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
2309 (0xf << DEFECT7374_FSM_FIELD);
2310
2311 if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ)
2312 INFO(dev, "%s: Defect 7374 FsmValue %08x\n", __func__,
2313 fsmvalue);
2314 else
3e76fdcb
RRD
2315 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
2316 BIT(SET_EP_HIDE_STATUS_PHASE),
adc82f77
RRD
2317 &dev->epregs[0].ep_rsp);
2318
2319 /*
2320 * hardware optionally handles a bunch of standard requests
2321 * that the API hides from drivers anyway. have it do so.
2322 * endpoint status/features are handled in software, to
2323 * help pass tests for some dubious behavior.
2324 */
3e76fdcb
RRD
2325 writel(BIT(SET_ISOCHRONOUS_DELAY) |
2326 BIT(SET_SEL) |
2327 BIT(SET_TEST_MODE) |
2328 BIT(SET_ADDRESS) |
2329 BIT(GET_INTERFACE_STATUS) |
2330 BIT(GET_DEVICE_STATUS),
adc82f77
RRD
2331 &dev->usb->stdrsp);
2332 dev->wakeup_enable = 1;
3e76fdcb 2333 writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
adc82f77 2334 (dev->softconnect << USB_DETECT_ENABLE) |
3e76fdcb 2335 BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
adc82f77
RRD
2336 &dev->usb->usbctl);
2337
2338 /* enable irqs so we can see ep0 and general operation */
3e76fdcb
RRD
2339 writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
2340 BIT(ENDPOINT_0_INTERRUPT_ENABLE)
adc82f77 2341 , &dev->regs->pciirqenb0);
3e76fdcb
RRD
2342 writel(BIT(PCI_INTERRUPT_ENABLE) |
2343 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
2344 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE) |
2345 BIT(VBUS_INTERRUPT_ENABLE),
adc82f77
RRD
2346 &dev->regs->pciirqenb1);
2347
2348 /* don't leave any writes posted */
2349 (void)readl(&dev->usb->usbctl);
2350}
2351
2352static void ep0_start(struct net2280 *dev)
2353{
c2db8a8a 2354 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
adc82f77
RRD
2355 return ep0_start_228x(dev);
2356 return ep0_start_338x(dev);
2357}
2358
1da177e4
LT
2359/* when a driver is successfully registered, it will receive
2360 * control requests including set_configuration(), which enables
2361 * non-control requests. then usb traffic follows until a
2362 * disconnect is reported. then a host may connect again, or
2363 * the driver might get unbound.
2364 */
4cf5e00b
FB
2365static int net2280_start(struct usb_gadget *_gadget,
2366 struct usb_gadget_driver *driver)
1da177e4 2367{
4cf5e00b 2368 struct net2280 *dev;
1da177e4
LT
2369 int retval;
2370 unsigned i;
2371
2372 /* insist on high speed support from the driver, since
2373 * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
2374 * "must not be used in normal operation"
2375 */
7177aed4 2376 if (!driver || driver->max_speed < USB_SPEED_HIGH
4cf5e00b 2377 || !driver->setup)
1da177e4 2378 return -EINVAL;
4cf5e00b
FB
2379
2380 dev = container_of (_gadget, struct net2280, gadget);
1da177e4 2381
adc82f77 2382 for (i = 0; i < dev->n_ep; i++)
1da177e4
LT
2383 dev->ep [i].irqs = 0;
2384
2385 /* hook up the driver ... */
2386 dev->softconnect = 1;
2387 driver->driver.bus = NULL;
2388 dev->driver = driver;
1da177e4 2389
b3899dac
JG
2390 retval = device_create_file (&dev->pdev->dev, &dev_attr_function);
2391 if (retval) goto err_unbind;
2392 retval = device_create_file (&dev->pdev->dev, &dev_attr_queues);
2393 if (retval) goto err_func;
1da177e4 2394
2f076077 2395 /* Enable force-full-speed testing mode, if desired */
c2db8a8a 2396 if (full_speed && dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
3e76fdcb 2397 writel(BIT(FORCE_FULL_SPEED_MODE), &dev->usb->xcvrdiag);
2f076077 2398
1da177e4
LT
2399 /* ... then enable host detection and ep0; and we're ready
2400 * for set_configuration as well as eventual disconnect.
2401 */
2402 net2280_led_active (dev, 1);
adc82f77 2403
c2db8a8a 2404 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX)
adc82f77
RRD
2405 defect7374_enable_data_eps_zero(dev);
2406
1da177e4
LT
2407 ep0_start (dev);
2408
2409 DEBUG (dev, "%s ready, usbctl %08x stdrsp %08x\n",
2410 driver->driver.name,
2411 readl (&dev->usb->usbctl),
2412 readl (&dev->usb->stdrsp));
2413
2414 /* pci writes may still be posted */
2415 return 0;
b3899dac
JG
2416
2417err_func:
2418 device_remove_file (&dev->pdev->dev, &dev_attr_function);
2419err_unbind:
b3899dac
JG
2420 dev->driver = NULL;
2421 return retval;
1da177e4 2422}
1da177e4
LT
2423
2424static void
2425stop_activity (struct net2280 *dev, struct usb_gadget_driver *driver)
2426{
2427 int i;
2428
2429 /* don't disconnect if it's not connected */
2430 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
2431 driver = NULL;
2432
2433 /* stop hardware; prevent new request submissions;
2434 * and kill any outstanding requests.
2435 */
2436 usb_reset (dev);
adc82f77 2437 for (i = 0; i < dev->n_ep; i++)
1da177e4
LT
2438 nuke (&dev->ep [i]);
2439
699412d9
FB
2440 /* report disconnect; the driver is already quiesced */
2441 if (driver) {
2442 spin_unlock(&dev->lock);
2443 driver->disconnect(&dev->gadget);
2444 spin_lock(&dev->lock);
2445 }
2446
1da177e4
LT
2447 usb_reinit (dev);
2448}
2449
4cf5e00b
FB
2450static int net2280_stop(struct usb_gadget *_gadget,
2451 struct usb_gadget_driver *driver)
1da177e4 2452{
4cf5e00b 2453 struct net2280 *dev;
1da177e4
LT
2454 unsigned long flags;
2455
4cf5e00b 2456 dev = container_of (_gadget, struct net2280, gadget);
1da177e4
LT
2457
2458 spin_lock_irqsave (&dev->lock, flags);
2459 stop_activity (dev, driver);
2460 spin_unlock_irqrestore (&dev->lock, flags);
2461
1da177e4
LT
2462 dev->driver = NULL;
2463
2464 net2280_led_active (dev, 0);
2f076077
AS
2465
2466 /* Disable full-speed test mode */
c2db8a8a 2467 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
adc82f77 2468 writel(0, &dev->usb->xcvrdiag);
2f076077 2469
1da177e4
LT
2470 device_remove_file (&dev->pdev->dev, &dev_attr_function);
2471 device_remove_file (&dev->pdev->dev, &dev_attr_queues);
2472
84237bfb
RRD
2473 DEBUG(dev, "unregistered driver '%s'\n",
2474 driver ? driver->driver.name : "");
2475
1da177e4
LT
2476 return 0;
2477}
1da177e4
LT
2478
2479/*-------------------------------------------------------------------------*/
2480
2481/* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
2482 * also works for dma-capable endpoints, in pio mode or just
2483 * to manually advance the queue after short OUT transfers.
2484 */
2485static void handle_ep_small (struct net2280_ep *ep)
2486{
2487 struct net2280_request *req;
2488 u32 t;
2489 /* 0 error, 1 mid-data, 2 done */
2490 int mode = 1;
2491
2492 if (!list_empty (&ep->queue))
2493 req = list_entry (ep->queue.next,
2494 struct net2280_request, queue);
2495 else
2496 req = NULL;
2497
2498 /* ack all, and handle what we care about */
2499 t = readl (&ep->regs->ep_stat);
2500 ep->irqs++;
2501#if 0
2502 VDEBUG (ep->dev, "%s ack ep_stat %08x, req %p\n",
2503 ep->ep.name, t, req ? &req->req : 0);
2504#endif
950ee4c8 2505 if (!ep->is_in || ep->dev->pdev->device == 0x2280)
3e76fdcb 2506 writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat);
950ee4c8
GL
2507 else
2508 /* Added for 2282 */
2509 writel (t, &ep->regs->ep_stat);
1da177e4
LT
2510
2511 /* for ep0, monitor token irqs to catch data stage length errors
2512 * and to synchronize on status.
2513 *
2514 * also, to defer reporting of protocol stalls ... here's where
2515 * data or status first appears, handling stalls here should never
2516 * cause trouble on the host side..
2517 *
2518 * control requests could be slightly faster without token synch for
2519 * status, but status can jam up that way.
2520 */
2521 if (unlikely (ep->num == 0)) {
2522 if (ep->is_in) {
2523 /* status; stop NAKing */
3e76fdcb 2524 if (t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) {
1da177e4
LT
2525 if (ep->dev->protocol_stall) {
2526 ep->stopped = 1;
2527 set_halt (ep);
2528 }
2529 if (!req)
2530 allow_status (ep);
2531 mode = 2;
2532 /* reply to extra IN data tokens with a zlp */
3e76fdcb 2533 } else if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
1da177e4
LT
2534 if (ep->dev->protocol_stall) {
2535 ep->stopped = 1;
2536 set_halt (ep);
2537 mode = 2;
1f26e28d
AS
2538 } else if (ep->responded &&
2539 !req && !ep->stopped)
1da177e4
LT
2540 write_fifo (ep, NULL);
2541 }
2542 } else {
2543 /* status; stop NAKing */
3e76fdcb 2544 if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
1da177e4
LT
2545 if (ep->dev->protocol_stall) {
2546 ep->stopped = 1;
2547 set_halt (ep);
2548 }
2549 mode = 2;
2550 /* an extra OUT token is an error */
3e76fdcb 2551 } else if (((t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT))
1da177e4
LT
2552 && req
2553 && req->req.actual == req->req.length)
1f26e28d 2554 || (ep->responded && !req)) {
1da177e4
LT
2555 ep->dev->protocol_stall = 1;
2556 set_halt (ep);
2557 ep->stopped = 1;
2558 if (req)
2559 done (ep, req, -EOVERFLOW);
2560 req = NULL;
2561 }
2562 }
2563 }
2564
2565 if (unlikely (!req))
2566 return;
2567
2568 /* manual DMA queue advance after short OUT */
ad303db6 2569 if (likely (ep->dma)) {
3e76fdcb 2570 if (t & BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
1da177e4
LT
2571 u32 count;
2572 int stopped = ep->stopped;
2573
2574 /* TRANSFERRED works around OUT_DONE erratum 0112.
2575 * we expect (N <= maxpacket) bytes; host wrote M.
2576 * iff (M < N) we won't ever see a DMA interrupt.
2577 */
2578 ep->stopped = 1;
2579 for (count = 0; ; t = readl (&ep->regs->ep_stat)) {
2580
2581 /* any preceding dma transfers must finish.
2582 * dma handles (M >= N), may empty the queue
2583 */
2584 scan_dma_completions (ep);
2585 if (unlikely (list_empty (&ep->queue)
2586 || ep->out_overflow)) {
2587 req = NULL;
2588 break;
2589 }
2590 req = list_entry (ep->queue.next,
2591 struct net2280_request, queue);
2592
2593 /* here either (M < N), a "real" short rx;
2594 * or (M == N) and the queue didn't empty
2595 */
3e76fdcb 2596 if (likely(t & BIT(FIFO_EMPTY))) {
1da177e4
LT
2597 count = readl (&ep->dma->dmacount);
2598 count &= DMA_BYTE_COUNT_MASK;
2599 if (readl (&ep->dma->dmadesc)
2600 != req->td_dma)
2601 req = NULL;
2602 break;
2603 }
2604 udelay(1);
2605 }
2606
2607 /* stop DMA, leave ep NAKing */
3e76fdcb 2608 writel(BIT(DMA_ABORT), &ep->dma->dmastat);
1da177e4
LT
2609 spin_stop_dma (ep->dma);
2610
2611 if (likely (req)) {
2612 req->td->dmacount = 0;
2613 t = readl (&ep->regs->ep_avail);
68dcc688 2614 dma_done (ep, req, count,
901b3d75
DB
2615 (ep->out_overflow || t)
2616 ? -EOVERFLOW : 0);
1da177e4
LT
2617 }
2618
2619 /* also flush to prevent erratum 0106 trouble */
2620 if (unlikely (ep->out_overflow
2621 || (ep->dev->chiprev == 0x0100
2622 && ep->dev->gadget.speed
2623 == USB_SPEED_FULL))) {
2624 out_flush (ep);
2625 ep->out_overflow = 0;
2626 }
2627
2628 /* (re)start dma if needed, stop NAKing */
2629 ep->stopped = stopped;
2630 if (!list_empty (&ep->queue))
2631 restart_dma (ep);
2632 } else
2633 DEBUG (ep->dev, "%s dma ep_stat %08x ??\n",
2634 ep->ep.name, t);
2635 return;
2636
2637 /* data packet(s) received (in the fifo, OUT) */
3e76fdcb 2638 } else if (t & BIT(DATA_PACKET_RECEIVED_INTERRUPT)) {
1da177e4
LT
2639 if (read_fifo (ep, req) && ep->num != 0)
2640 mode = 2;
2641
2642 /* data packet(s) transmitted (IN) */
3e76fdcb 2643 } else if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT)) {
1da177e4
LT
2644 unsigned len;
2645
2646 len = req->req.length - req->req.actual;
2647 if (len > ep->ep.maxpacket)
2648 len = ep->ep.maxpacket;
2649 req->req.actual += len;
2650
2651 /* if we wrote it all, we're usually done */
2652 if (req->req.actual == req->req.length) {
2653 if (ep->num == 0) {
317e83b8 2654 /* send zlps until the status stage */
1da177e4
LT
2655 } else if (!req->req.zero || len != ep->ep.maxpacket)
2656 mode = 2;
2657 }
2658
2659 /* there was nothing to do ... */
2660 } else if (mode == 1)
2661 return;
2662
2663 /* done */
2664 if (mode == 2) {
2665 /* stream endpoints often resubmit/unlink in completion */
2666 done (ep, req, 0);
2667
2668 /* maybe advance queue to next request */
2669 if (ep->num == 0) {
2670 /* NOTE: net2280 could let gadget driver start the
2671 * status stage later. since not all controllers let
2672 * them control that, the api doesn't (yet) allow it.
2673 */
2674 if (!ep->stopped)
2675 allow_status (ep);
2676 req = NULL;
2677 } else {
2678 if (!list_empty (&ep->queue) && !ep->stopped)
2679 req = list_entry (ep->queue.next,
2680 struct net2280_request, queue);
2681 else
2682 req = NULL;
2683 if (req && !ep->is_in)
2684 stop_out_naking (ep);
2685 }
2686 }
2687
2688 /* is there a buffer for the next packet?
2689 * for best streaming performance, make sure there is one.
2690 */
2691 if (req && !ep->stopped) {
2692
2693 /* load IN fifo with next packet (may be zlp) */
3e76fdcb 2694 if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT))
1da177e4
LT
2695 write_fifo (ep, &req->req);
2696 }
2697}
2698
2699static struct net2280_ep *
2700get_ep_by_addr (struct net2280 *dev, u16 wIndex)
2701{
2702 struct net2280_ep *ep;
2703
2704 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
2705 return &dev->ep [0];
2706 list_for_each_entry (ep, &dev->gadget.ep_list, ep.ep_list) {
2707 u8 bEndpointAddress;
2708
2709 if (!ep->desc)
2710 continue;
2711 bEndpointAddress = ep->desc->bEndpointAddress;
2712 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
2713 continue;
2714 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
2715 return ep;
2716 }
2717 return NULL;
2718}
2719
adc82f77
RRD
2720static void defect7374_workaround(struct net2280 *dev, struct usb_ctrlrequest r)
2721{
2722 u32 scratch, fsmvalue;
2723 u32 ack_wait_timeout, state;
2724
2725 /* Workaround for Defect 7374 (U1/U2 erroneously rejected): */
2726 scratch = get_idx_reg(dev->regs, SCRATCH);
2727 fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD);
2728 scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
2729
2730 if (!((fsmvalue == DEFECT7374_FSM_WAITING_FOR_CONTROL_READ) &&
2731 (r.bRequestType & USB_DIR_IN)))
2732 return;
2733
2734 /* This is the first Control Read for this connection: */
3e76fdcb 2735 if (!(readl(&dev->usb->usbstat) & BIT(SUPER_SPEED_MODE))) {
adc82f77
RRD
2736 /*
2737 * Connection is NOT SS:
2738 * - Connection must be FS or HS.
2739 * - This FSM state should allow workaround software to
2740 * run after the next USB connection.
2741 */
2742 scratch |= DEFECT7374_FSM_NON_SS_CONTROL_READ;
2743 goto restore_data_eps;
2744 }
2745
2746 /* Connection is SS: */
2747 for (ack_wait_timeout = 0;
2748 ack_wait_timeout < DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS;
2749 ack_wait_timeout++) {
2750
2751 state = readl(&dev->plregs->pl_ep_status_1)
2752 & (0xff << STATE);
2753 if ((state >= (ACK_GOOD_NORMAL << STATE)) &&
2754 (state <= (ACK_GOOD_MORE_ACKS_TO_COME << STATE))) {
2755 scratch |= DEFECT7374_FSM_SS_CONTROL_READ;
2756 break;
2757 }
2758
2759 /*
2760 * We have not yet received host's Data Phase ACK
2761 * - Wait and try again.
2762 */
2763 udelay(DEFECT_7374_PROCESSOR_WAIT_TIME);
2764
2765 continue;
2766 }
2767
2768
2769 if (ack_wait_timeout >= DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS) {
2770 ERROR(dev, "FAIL: Defect 7374 workaround waited but failed "
2771 "to detect SS host's data phase ACK.");
2772 ERROR(dev, "PL_EP_STATUS_1(23:16):.Expected from 0x11 to 0x16"
2773 "got 0x%2.2x.\n", state >> STATE);
2774 } else {
2775 WARNING(dev, "INFO: Defect 7374 workaround waited about\n"
2776 "%duSec for Control Read Data Phase ACK\n",
2777 DEFECT_7374_PROCESSOR_WAIT_TIME * ack_wait_timeout);
2778 }
2779
2780restore_data_eps:
2781 /*
2782 * Restore data EPs to their pre-workaround settings (disabled,
2783 * initialized, and other details).
2784 */
2785 defect7374_disable_data_eps(dev);
2786
2787 set_idx_reg(dev->regs, SCRATCH, scratch);
2788
2789 return;
2790}
2791
2792static void ep_stall(struct net2280_ep *ep, int stall)
2793{
2794 struct net2280 *dev = ep->dev;
2795 u32 val;
2796 static const u32 ep_pl[9] = { 0, 3, 4, 7, 8, 2, 5, 6, 9 };
2797
2798 if (stall) {
3e76fdcb
RRD
2799 writel(BIT(SET_ENDPOINT_HALT) |
2800 /* BIT(SET_NAK_PACKETS) | */
2801 BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE),
adc82f77
RRD
2802 &ep->regs->ep_rsp);
2803 ep->is_halt = 1;
2804 } else {
2805 if (dev->gadget.speed == USB_SPEED_SUPER) {
2806 /*
2807 * Workaround for SS SeqNum not cleared via
2808 * Endpoint Halt (Clear) bit. select endpoint
2809 */
2810 val = readl(&dev->plregs->pl_ep_ctrl);
2811 val = (val & ~0x1f) | ep_pl[ep->num];
2812 writel(val, &dev->plregs->pl_ep_ctrl);
2813
3e76fdcb 2814 val |= BIT(SEQUENCE_NUMBER_RESET);
adc82f77
RRD
2815 writel(val, &dev->plregs->pl_ep_ctrl);
2816 }
2817 val = readl(&ep->regs->ep_rsp);
3e76fdcb
RRD
2818 val |= BIT(CLEAR_ENDPOINT_HALT) |
2819 BIT(CLEAR_ENDPOINT_TOGGLE);
adc82f77 2820 writel(val
3e76fdcb 2821 /* | BIT(CLEAR_NAK_PACKETS)*/
adc82f77
RRD
2822 , &ep->regs->ep_rsp);
2823 ep->is_halt = 0;
2824 val = readl(&ep->regs->ep_rsp);
2825 }
2826}
2827
2828static void ep_stdrsp(struct net2280_ep *ep, int value, int wedged)
2829{
2830 /* set/clear, then synch memory views with the device */
2831 if (value) {
2832 ep->stopped = 1;
2833 if (ep->num == 0)
2834 ep->dev->protocol_stall = 1;
2835 else {
2836 if (ep->dma)
2837 ep_stop_dma(ep);
2838 ep_stall(ep, true);
2839 }
2840
2841 if (wedged)
2842 ep->wedged = 1;
2843 } else {
2844 ep->stopped = 0;
2845 ep->wedged = 0;
2846
2847 ep_stall(ep, false);
2848
2849 /* Flush the queue */
2850 if (!list_empty(&ep->queue)) {
2851 struct net2280_request *req =
2852 list_entry(ep->queue.next, struct net2280_request,
2853 queue);
2854 if (ep->dma)
2855 resume_dma(ep);
2856 else {
2857 if (ep->is_in)
2858 write_fifo(ep, &req->req);
2859 else {
2860 if (read_fifo(ep, req))
2861 done(ep, req, 0);
2862 }
2863 }
2864 }
2865 }
2866}
2867
2868static void handle_stat0_irqs_superspeed(struct net2280 *dev,
2869 struct net2280_ep *ep, struct usb_ctrlrequest r)
2870{
2871 int tmp = 0;
2872
2873#define w_value le16_to_cpu(r.wValue)
2874#define w_index le16_to_cpu(r.wIndex)
2875#define w_length le16_to_cpu(r.wLength)
2876
2877 switch (r.bRequest) {
2878 struct net2280_ep *e;
2879 u16 status;
2880
2881 case USB_REQ_SET_CONFIGURATION:
2882 dev->addressed_state = !w_value;
2883 goto usb3_delegate;
2884
2885 case USB_REQ_GET_STATUS:
2886 switch (r.bRequestType) {
2887 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2888 status = dev->wakeup_enable ? 0x02 : 0x00;
2889 if (dev->selfpowered)
3e76fdcb 2890 status |= BIT(0);
adc82f77
RRD
2891 status |= (dev->u1_enable << 2 | dev->u2_enable << 3 |
2892 dev->ltm_enable << 4);
2893 writel(0, &dev->epregs[0].ep_irqenb);
2894 set_fifo_bytecount(ep, sizeof(status));
2895 writel((__force u32) status, &dev->epregs[0].ep_data);
2896 allow_status_338x(ep);
2897 break;
2898
2899 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2900 e = get_ep_by_addr(dev, w_index);
2901 if (!e)
2902 goto do_stall3;
2903 status = readl(&e->regs->ep_rsp) &
3e76fdcb 2904 BIT(CLEAR_ENDPOINT_HALT);
adc82f77
RRD
2905 writel(0, &dev->epregs[0].ep_irqenb);
2906 set_fifo_bytecount(ep, sizeof(status));
2907 writel((__force u32) status, &dev->epregs[0].ep_data);
2908 allow_status_338x(ep);
2909 break;
2910
2911 default:
2912 goto usb3_delegate;
2913 }
2914 break;
2915
2916 case USB_REQ_CLEAR_FEATURE:
2917 switch (r.bRequestType) {
2918 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2919 if (!dev->addressed_state) {
2920 switch (w_value) {
2921 case USB_DEVICE_U1_ENABLE:
2922 dev->u1_enable = 0;
2923 writel(readl(&dev->usb_ext->usbctl2) &
3e76fdcb 2924 ~BIT(U1_ENABLE),
adc82f77
RRD
2925 &dev->usb_ext->usbctl2);
2926 allow_status_338x(ep);
2927 goto next_endpoints3;
2928
2929 case USB_DEVICE_U2_ENABLE:
2930 dev->u2_enable = 0;
2931 writel(readl(&dev->usb_ext->usbctl2) &
3e76fdcb 2932 ~BIT(U2_ENABLE),
adc82f77
RRD
2933 &dev->usb_ext->usbctl2);
2934 allow_status_338x(ep);
2935 goto next_endpoints3;
2936
2937 case USB_DEVICE_LTM_ENABLE:
2938 dev->ltm_enable = 0;
2939 writel(readl(&dev->usb_ext->usbctl2) &
3e76fdcb 2940 ~BIT(LTM_ENABLE),
adc82f77
RRD
2941 &dev->usb_ext->usbctl2);
2942 allow_status_338x(ep);
2943 goto next_endpoints3;
2944
2945 default:
2946 break;
2947 }
2948 }
2949 if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
2950 dev->wakeup_enable = 0;
2951 writel(readl(&dev->usb->usbctl) &
3e76fdcb 2952 ~BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
adc82f77
RRD
2953 &dev->usb->usbctl);
2954 allow_status_338x(ep);
2955 break;
2956 }
2957 goto usb3_delegate;
2958
2959 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2960 e = get_ep_by_addr(dev, w_index);
2961 if (!e)
2962 goto do_stall3;
2963 if (w_value != USB_ENDPOINT_HALT)
2964 goto do_stall3;
2965 VDEBUG(dev, "%s clear halt\n", e->ep.name);
2966 ep_stall(e, false);
2967 if (!list_empty(&e->queue) && e->td_dma)
2968 restart_dma(e);
2969 allow_status(ep);
2970 ep->stopped = 1;
2971 break;
2972
2973 default:
2974 goto usb3_delegate;
2975 }
2976 break;
2977 case USB_REQ_SET_FEATURE:
2978 switch (r.bRequestType) {
2979 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2980 if (!dev->addressed_state) {
2981 switch (w_value) {
2982 case USB_DEVICE_U1_ENABLE:
2983 dev->u1_enable = 1;
2984 writel(readl(&dev->usb_ext->usbctl2) |
3e76fdcb 2985 BIT(U1_ENABLE),
adc82f77
RRD
2986 &dev->usb_ext->usbctl2);
2987 allow_status_338x(ep);
2988 goto next_endpoints3;
2989
2990 case USB_DEVICE_U2_ENABLE:
2991 dev->u2_enable = 1;
2992 writel(readl(&dev->usb_ext->usbctl2) |
3e76fdcb 2993 BIT(U2_ENABLE),
adc82f77
RRD
2994 &dev->usb_ext->usbctl2);
2995 allow_status_338x(ep);
2996 goto next_endpoints3;
2997
2998 case USB_DEVICE_LTM_ENABLE:
2999 dev->ltm_enable = 1;
3000 writel(readl(&dev->usb_ext->usbctl2) |
3e76fdcb 3001 BIT(LTM_ENABLE),
adc82f77
RRD
3002 &dev->usb_ext->usbctl2);
3003 allow_status_338x(ep);
3004 goto next_endpoints3;
3005 default:
3006 break;
3007 }
3008 }
3009
3010 if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
3011 dev->wakeup_enable = 1;
3012 writel(readl(&dev->usb->usbctl) |
3e76fdcb 3013 BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
adc82f77
RRD
3014 &dev->usb->usbctl);
3015 allow_status_338x(ep);
3016 break;
3017 }
3018 goto usb3_delegate;
3019
3020 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
3021 e = get_ep_by_addr(dev, w_index);
3022 if (!e || (w_value != USB_ENDPOINT_HALT))
3023 goto do_stall3;
3024 ep_stdrsp(e, true, false);
3025 allow_status_338x(ep);
3026 break;
3027
3028 default:
3029 goto usb3_delegate;
3030 }
3031
3032 break;
3033 default:
3034
3035usb3_delegate:
3036 VDEBUG(dev, "setup %02x.%02x v%04x i%04x l%04x ep_cfg %08x\n",
3037 r.bRequestType, r.bRequest,
3038 w_value, w_index, w_length,
3039 readl(&ep->cfg->ep_cfg));
3040
3041 ep->responded = 0;
3042 spin_unlock(&dev->lock);
3043 tmp = dev->driver->setup(&dev->gadget, &r);
3044 spin_lock(&dev->lock);
3045 }
3046do_stall3:
3047 if (tmp < 0) {
3048 VDEBUG(dev, "req %02x.%02x protocol STALL; stat %d\n",
3049 r.bRequestType, r.bRequest, tmp);
3050 dev->protocol_stall = 1;
3051 /* TD 9.9 Halt Endpoint test. TD 9.22 Set feature test */
3052 ep_stall(ep, true);
3053 }
3054
3055next_endpoints3:
3056
3057#undef w_value
3058#undef w_index
3059#undef w_length
3060
3061 return;
3062}
3063
1da177e4
LT
3064static void handle_stat0_irqs (struct net2280 *dev, u32 stat)
3065{
3066 struct net2280_ep *ep;
3067 u32 num, scratch;
3068
3069 /* most of these don't need individual acks */
3e76fdcb 3070 stat &= ~BIT(INTA_ASSERTED);
1da177e4
LT
3071 if (!stat)
3072 return;
3073 // DEBUG (dev, "irqstat0 %04x\n", stat);
3074
3075 /* starting a control request? */
3e76fdcb 3076 if (unlikely(stat & BIT(SETUP_PACKET_INTERRUPT))) {
1da177e4
LT
3077 union {
3078 u32 raw [2];
3079 struct usb_ctrlrequest r;
3080 } u;
950ee4c8 3081 int tmp;
1da177e4
LT
3082 struct net2280_request *req;
3083
3084 if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
adc82f77 3085 u32 val = readl(&dev->usb->usbstat);
3e76fdcb 3086 if (val & BIT(SUPER_SPEED)) {
adc82f77
RRD
3087 dev->gadget.speed = USB_SPEED_SUPER;
3088 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
3089 EP0_SS_MAX_PACKET_SIZE);
3e76fdcb 3090 } else if (val & BIT(HIGH_SPEED)) {
1da177e4 3091 dev->gadget.speed = USB_SPEED_HIGH;
adc82f77
RRD
3092 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
3093 EP0_HS_MAX_PACKET_SIZE);
3094 } else {
1da177e4 3095 dev->gadget.speed = USB_SPEED_FULL;
adc82f77
RRD
3096 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
3097 EP0_HS_MAX_PACKET_SIZE);
3098 }
1da177e4 3099 net2280_led_speed (dev, dev->gadget.speed);
e538dfda 3100 DEBUG(dev, "%s\n", usb_speed_string(dev->gadget.speed));
1da177e4
LT
3101 }
3102
3103 ep = &dev->ep [0];
3104 ep->irqs++;
3105
3106 /* make sure any leftover request state is cleared */
3e76fdcb 3107 stat &= ~BIT(ENDPOINT_0_INTERRUPT);
1da177e4
LT
3108 while (!list_empty (&ep->queue)) {
3109 req = list_entry (ep->queue.next,
3110 struct net2280_request, queue);
3111 done (ep, req, (req->req.actual == req->req.length)
3112 ? 0 : -EPROTO);
3113 }
3114 ep->stopped = 0;
3115 dev->protocol_stall = 0;
c2db8a8a 3116 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX)
adc82f77
RRD
3117 ep->is_halt = 0;
3118 else{
3119 if (ep->dev->pdev->device == 0x2280)
3e76fdcb
RRD
3120 tmp = BIT(FIFO_OVERFLOW) |
3121 BIT(FIFO_UNDERFLOW);
adc82f77
RRD
3122 else
3123 tmp = 0;
3124
3e76fdcb
RRD
3125 writel(tmp | BIT(TIMEOUT) |
3126 BIT(USB_STALL_SENT) |
3127 BIT(USB_IN_NAK_SENT) |
3128 BIT(USB_IN_ACK_RCVD) |
3129 BIT(USB_OUT_PING_NAK_SENT) |
3130 BIT(USB_OUT_ACK_SENT) |
3131 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
3132 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
3133 BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
3134 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
3135 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
3136 BIT(DATA_IN_TOKEN_INTERRUPT)
adc82f77
RRD
3137 , &ep->regs->ep_stat);
3138 }
3139 u.raw[0] = readl(&dev->usb->setup0123);
3140 u.raw[1] = readl(&dev->usb->setup4567);
901b3d75 3141
1da177e4
LT
3142 cpu_to_le32s (&u.raw [0]);
3143 cpu_to_le32s (&u.raw [1]);
3144
c2db8a8a 3145 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX)
adc82f77
RRD
3146 defect7374_workaround(dev, u.r);
3147
950ee4c8
GL
3148 tmp = 0;
3149
01ee7d70
DB
3150#define w_value le16_to_cpu(u.r.wValue)
3151#define w_index le16_to_cpu(u.r.wIndex)
3152#define w_length le16_to_cpu(u.r.wLength)
1da177e4
LT
3153
3154 /* ack the irq */
3e76fdcb
RRD
3155 writel(BIT(SETUP_PACKET_INTERRUPT), &dev->regs->irqstat0);
3156 stat ^= BIT(SETUP_PACKET_INTERRUPT);
1da177e4
LT
3157
3158 /* watch control traffic at the token level, and force
3159 * synchronization before letting the status stage happen.
3160 * FIXME ignore tokens we'll NAK, until driver responds.
3161 * that'll mean a lot less irqs for some drivers.
3162 */
3163 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
3164 if (ep->is_in) {
3e76fdcb
RRD
3165 scratch = BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
3166 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
3167 BIT(DATA_IN_TOKEN_INTERRUPT);
1da177e4
LT
3168 stop_out_naking (ep);
3169 } else
3e76fdcb
RRD
3170 scratch = BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
3171 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
3172 BIT(DATA_IN_TOKEN_INTERRUPT);
1da177e4
LT
3173 writel (scratch, &dev->epregs [0].ep_irqenb);
3174
3175 /* we made the hardware handle most lowlevel requests;
3176 * everything else goes uplevel to the gadget code.
3177 */
1f26e28d 3178 ep->responded = 1;
adc82f77
RRD
3179
3180 if (dev->gadget.speed == USB_SPEED_SUPER) {
3181 handle_stat0_irqs_superspeed(dev, ep, u.r);
3182 goto next_endpoints;
3183 }
3184
1da177e4
LT
3185 switch (u.r.bRequest) {
3186 case USB_REQ_GET_STATUS: {
3187 struct net2280_ep *e;
320f3459 3188 __le32 status;
1da177e4
LT
3189
3190 /* hw handles device and interface status */
3191 if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
3192 goto delegate;
ad303db6 3193 if ((e = get_ep_by_addr (dev, w_index)) == NULL
320f3459 3194 || w_length > 2)
1da177e4
LT
3195 goto do_stall;
3196
3e76fdcb 3197 if (readl(&e->regs->ep_rsp) & BIT(SET_ENDPOINT_HALT))
551509d2 3198 status = cpu_to_le32 (1);
1da177e4 3199 else
551509d2 3200 status = cpu_to_le32 (0);
1da177e4
LT
3201
3202 /* don't bother with a request object! */
3203 writel (0, &dev->epregs [0].ep_irqenb);
320f3459
DB
3204 set_fifo_bytecount (ep, w_length);
3205 writel ((__force u32)status, &dev->epregs [0].ep_data);
1da177e4
LT
3206 allow_status (ep);
3207 VDEBUG (dev, "%s stat %02x\n", ep->ep.name, status);
3208 goto next_endpoints;
3209 }
3210 break;
3211 case USB_REQ_CLEAR_FEATURE: {
3212 struct net2280_ep *e;
3213
3214 /* hw handles device features */
3215 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
3216 goto delegate;
320f3459
DB
3217 if (w_value != USB_ENDPOINT_HALT
3218 || w_length != 0)
1da177e4 3219 goto do_stall;
ad303db6 3220 if ((e = get_ep_by_addr (dev, w_index)) == NULL)
1da177e4 3221 goto do_stall;
8066134f
AS
3222 if (e->wedged) {
3223 VDEBUG(dev, "%s wedged, halt not cleared\n",
3224 ep->ep.name);
3225 } else {
adc82f77 3226 VDEBUG(dev, "%s clear halt\n", e->ep.name);
8066134f 3227 clear_halt(e);
c2db8a8a
RRD
3228 if (ep->dev->pdev->vendor ==
3229 PCI_VENDOR_ID_PLX &&
adc82f77
RRD
3230 !list_empty(&e->queue) && e->td_dma)
3231 restart_dma(e);
8066134f 3232 }
1da177e4 3233 allow_status (ep);
1da177e4
LT
3234 goto next_endpoints;
3235 }
3236 break;
3237 case USB_REQ_SET_FEATURE: {
3238 struct net2280_ep *e;
3239
3240 /* hw handles device features */
3241 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
3242 goto delegate;
320f3459
DB
3243 if (w_value != USB_ENDPOINT_HALT
3244 || w_length != 0)
1da177e4 3245 goto do_stall;
ad303db6 3246 if ((e = get_ep_by_addr (dev, w_index)) == NULL)
1da177e4 3247 goto do_stall;
8066134f
AS
3248 if (e->ep.name == ep0name)
3249 goto do_stall;
1da177e4 3250 set_halt (e);
c2db8a8a 3251 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX && e->dma)
adc82f77 3252 abort_dma(e);
1da177e4
LT
3253 allow_status (ep);
3254 VDEBUG (dev, "%s set halt\n", ep->ep.name);
3255 goto next_endpoints;
3256 }
3257 break;
3258 default:
3259delegate:
fec8de3a 3260 VDEBUG (dev, "setup %02x.%02x v%04x i%04x l%04x "
1da177e4
LT
3261 "ep_cfg %08x\n",
3262 u.r.bRequestType, u.r.bRequest,
320f3459 3263 w_value, w_index, w_length,
adc82f77 3264 readl(&ep->cfg->ep_cfg));
1f26e28d 3265 ep->responded = 0;
1da177e4
LT
3266 spin_unlock (&dev->lock);
3267 tmp = dev->driver->setup (&dev->gadget, &u.r);
3268 spin_lock (&dev->lock);
3269 }
3270
3271 /* stall ep0 on error */
3272 if (tmp < 0) {
3273do_stall:
3274 VDEBUG (dev, "req %02x.%02x protocol STALL; stat %d\n",
3275 u.r.bRequestType, u.r.bRequest, tmp);
3276 dev->protocol_stall = 1;
3277 }
3278
3279 /* some in/out token irq should follow; maybe stall then.
3280 * driver must queue a request (even zlp) or halt ep0
3281 * before the host times out.
3282 */
3283 }
3284
320f3459
DB
3285#undef w_value
3286#undef w_index
3287#undef w_length
3288
1da177e4
LT
3289next_endpoints:
3290 /* endpoint data irq ? */
3291 scratch = stat & 0x7f;
3292 stat &= ~0x7f;
3293 for (num = 0; scratch; num++) {
3294 u32 t;
3295
3296 /* do this endpoint's FIFO and queue need tending? */
3e76fdcb 3297 t = BIT(num);
1da177e4
LT
3298 if ((scratch & t) == 0)
3299 continue;
3300 scratch ^= t;
3301
3302 ep = &dev->ep [num];
3303 handle_ep_small (ep);
3304 }
3305
3306 if (stat)
3307 DEBUG (dev, "unhandled irqstat0 %08x\n", stat);
3308}
3309
3e76fdcb
RRD
3310#define DMA_INTERRUPTS (BIT(DMA_D_INTERRUPT) | \
3311 BIT(DMA_C_INTERRUPT) | \
3312 BIT(DMA_B_INTERRUPT) | \
3313 BIT(DMA_A_INTERRUPT))
1da177e4 3314#define PCI_ERROR_INTERRUPTS ( \
3e76fdcb
RRD
3315 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT) | \
3316 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT) | \
3317 BIT(PCI_RETRY_ABORT_INTERRUPT))
1da177e4
LT
3318
3319static void handle_stat1_irqs (struct net2280 *dev, u32 stat)
3320{
3321 struct net2280_ep *ep;
3322 u32 tmp, num, mask, scratch;
3323
3324 /* after disconnect there's nothing else to do! */
3e76fdcb
RRD
3325 tmp = BIT(VBUS_INTERRUPT) | BIT(ROOT_PORT_RESET_INTERRUPT);
3326 mask = BIT(SUPER_SPEED) | BIT(HIGH_SPEED) | BIT(FULL_SPEED);
1da177e4
LT
3327
3328 /* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set.
fb914ebf 3329 * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and
901b3d75 3330 * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT
1da177e4
LT
3331 * only indicates a change in the reset state).
3332 */
3333 if (stat & tmp) {
3334 writel (tmp, &dev->regs->irqstat1);
3e76fdcb 3335 if ((((stat & BIT(ROOT_PORT_RESET_INTERRUPT))
901b3d75
DB
3336 && ((readl (&dev->usb->usbstat) & mask)
3337 == 0))
3338 || ((readl (&dev->usb->usbctl)
3e76fdcb 3339 & BIT(VBUS_PIN)) == 0)
1da177e4
LT
3340 ) && ( dev->gadget.speed != USB_SPEED_UNKNOWN)) {
3341 DEBUG (dev, "disconnect %s\n",
3342 dev->driver->driver.name);
3343 stop_activity (dev, dev->driver);
3344 ep0_start (dev);
3345 return;
3346 }
3347 stat &= ~tmp;
3348
3349 /* vBUS can bounce ... one of many reasons to ignore the
3350 * notion of hotplug events on bus connect/disconnect!
3351 */
3352 if (!stat)
3353 return;
3354 }
3355
3356 /* NOTE: chip stays in PCI D0 state for now, but it could
3357 * enter D1 to save more power
3358 */
3e76fdcb 3359 tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT);
1da177e4
LT
3360 if (stat & tmp) {
3361 writel (tmp, &dev->regs->irqstat1);
3e76fdcb 3362 if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) {
1da177e4
LT
3363 if (dev->driver->suspend)
3364 dev->driver->suspend (&dev->gadget);
3365 if (!enable_suspend)
3e76fdcb 3366 stat &= ~BIT(SUSPEND_REQUEST_INTERRUPT);
1da177e4
LT
3367 } else {
3368 if (dev->driver->resume)
3369 dev->driver->resume (&dev->gadget);
3370 /* at high speed, note erratum 0133 */
3371 }
3372 stat &= ~tmp;
3373 }
3374
3375 /* clear any other status/irqs */
3376 if (stat)
3377 writel (stat, &dev->regs->irqstat1);
3378
3379 /* some status we can just ignore */
950ee4c8 3380 if (dev->pdev->device == 0x2280)
3e76fdcb
RRD
3381 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
3382 BIT(SUSPEND_REQUEST_INTERRUPT) |
3383 BIT(RESUME_INTERRUPT) |
3384 BIT(SOF_INTERRUPT));
950ee4c8 3385 else
3e76fdcb
RRD
3386 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
3387 BIT(RESUME_INTERRUPT) |
3388 BIT(SOF_DOWN_INTERRUPT) |
3389 BIT(SOF_INTERRUPT));
950ee4c8 3390
1da177e4
LT
3391 if (!stat)
3392 return;
3393 // DEBUG (dev, "irqstat1 %08x\n", stat);
3394
3395 /* DMA status, for ep-{a,b,c,d} */
3396 scratch = stat & DMA_INTERRUPTS;
3397 stat &= ~DMA_INTERRUPTS;
3398 scratch >>= 9;
3399 for (num = 0; scratch; num++) {
3400 struct net2280_dma_regs __iomem *dma;
3401
3e76fdcb 3402 tmp = BIT(num);
1da177e4
LT
3403 if ((tmp & scratch) == 0)
3404 continue;
3405 scratch ^= tmp;
3406
3407 ep = &dev->ep [num + 1];
3408 dma = ep->dma;
3409
3410 if (!dma)
3411 continue;
3412
3413 /* clear ep's dma status */
3414 tmp = readl (&dma->dmastat);
3415 writel (tmp, &dma->dmastat);
3416
adc82f77 3417 /* dma sync*/
c2db8a8a 3418 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX) {
adc82f77
RRD
3419 u32 r_dmacount = readl(&dma->dmacount);
3420 if (!ep->is_in && (r_dmacount & 0x00FFFFFF) &&
3e76fdcb 3421 (tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT)))
adc82f77
RRD
3422 continue;
3423 }
3424
1da177e4
LT
3425 /* chaining should stop on abort, short OUT from fifo,
3426 * or (stat0 codepath) short OUT transfer.
3427 */
3428 if (!use_dma_chaining) {
3e76fdcb 3429 if (!(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) {
1da177e4
LT
3430 DEBUG (ep->dev, "%s no xact done? %08x\n",
3431 ep->ep.name, tmp);
3432 continue;
3433 }
3434 stop_dma (ep->dma);
3435 }
3436
3437 /* OUT transfers terminate when the data from the
3438 * host is in our memory. Process whatever's done.
3439 * On this path, we know transfer's last packet wasn't
3440 * less than req->length. NAK_OUT_PACKETS may be set,
3441 * or the FIFO may already be holding new packets.
3442 *
3443 * IN transfers can linger in the FIFO for a very
3444 * long time ... we ignore that for now, accounting
3445 * precisely (like PIO does) needs per-packet irqs
3446 */
3447 scan_dma_completions (ep);
3448
3449 /* disable dma on inactive queues; else maybe restart */
3450 if (list_empty (&ep->queue)) {
3451 if (use_dma_chaining)
3452 stop_dma (ep->dma);
3453 } else {
3454 tmp = readl (&dma->dmactl);
3e76fdcb 3455 if (!use_dma_chaining || (tmp & BIT(DMA_ENABLE)) == 0)
1da177e4
LT
3456 restart_dma (ep);
3457 else if (ep->is_in && use_dma_chaining) {
3458 struct net2280_request *req;
320f3459 3459 __le32 dmacount;
1da177e4
LT
3460
3461 /* the descriptor at the head of the chain
3462 * may still have VALID_BIT clear; that's
3463 * used to trigger changing DMA_FIFO_VALIDATE
3464 * (affects automagic zlp writes).
3465 */
3466 req = list_entry (ep->queue.next,
3467 struct net2280_request, queue);
3468 dmacount = req->td->dmacount;
3e76fdcb
RRD
3469 dmacount &= cpu_to_le32(BIT(VALID_BIT) |
3470 DMA_BYTE_COUNT_MASK);
1da177e4
LT
3471 if (dmacount && (dmacount & valid_bit) == 0)
3472 restart_dma (ep);
3473 }
3474 }
3475 ep->irqs++;
3476 }
3477
3478 /* NOTE: there are other PCI errors we might usefully notice.
3479 * if they appear very often, here's where to try recovering.
3480 */
3481 if (stat & PCI_ERROR_INTERRUPTS) {
3482 ERROR (dev, "pci dma error; stat %08x\n", stat);
3483 stat &= ~PCI_ERROR_INTERRUPTS;
3484 /* these are fatal errors, but "maybe" they won't
3485 * happen again ...
3486 */
3487 stop_activity (dev, dev->driver);
3488 ep0_start (dev);
3489 stat = 0;
3490 }
3491
3492 if (stat)
3493 DEBUG (dev, "unhandled irqstat1 %08x\n", stat);
3494}
3495
7d12e780 3496static irqreturn_t net2280_irq (int irq, void *_dev)
1da177e4
LT
3497{
3498 struct net2280 *dev = _dev;
3499
658ad5e0 3500 /* shared interrupt, not ours */
c2db8a8a 3501 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY &&
3e76fdcb 3502 (!(readl(&dev->regs->irqstat0) & BIT(INTA_ASSERTED))))
658ad5e0
AS
3503 return IRQ_NONE;
3504
1da177e4
LT
3505 spin_lock (&dev->lock);
3506
3507 /* handle disconnect, dma, and more */
3508 handle_stat1_irqs (dev, readl (&dev->regs->irqstat1));
3509
3510 /* control requests and PIO */
3511 handle_stat0_irqs (dev, readl (&dev->regs->irqstat0));
3512
c2db8a8a 3513 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX) {
adc82f77
RRD
3514 /* re-enable interrupt to trigger any possible new interrupt */
3515 u32 pciirqenb1 = readl(&dev->regs->pciirqenb1);
3516 writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1);
3517 writel(pciirqenb1, &dev->regs->pciirqenb1);
3518 }
3519
1da177e4
LT
3520 spin_unlock (&dev->lock);
3521
3522 return IRQ_HANDLED;
3523}
3524
3525/*-------------------------------------------------------------------------*/
3526
3527static void gadget_release (struct device *_dev)
3528{
3529 struct net2280 *dev = dev_get_drvdata (_dev);
3530
3531 kfree (dev);
3532}
3533
3534/* tear down the binding between this driver and the pci device */
3535
3536static void net2280_remove (struct pci_dev *pdev)
3537{
3538 struct net2280 *dev = pci_get_drvdata (pdev);
3539
0f91349b
SAS
3540 usb_del_gadget_udc(&dev->gadget);
3541
6bea476c 3542 BUG_ON(dev->driver);
1da177e4
LT
3543
3544 /* then clean up the resources we allocated during probe() */
3545 net2280_led_shutdown (dev);
3546 if (dev->requests) {
3547 int i;
3548 for (i = 1; i < 5; i++) {
3549 if (!dev->ep [i].dummy)
3550 continue;
3551 pci_pool_free (dev->requests, dev->ep [i].dummy,
3552 dev->ep [i].td_dma);
3553 }
3554 pci_pool_destroy (dev->requests);
3555 }
3556 if (dev->got_irq)
3557 free_irq (pdev->irq, dev);
c2db8a8a 3558 if (use_msi && dev->pdev->vendor == PCI_VENDOR_ID_PLX)
adc82f77 3559 pci_disable_msi(pdev);
1da177e4
LT
3560 if (dev->regs)
3561 iounmap (dev->regs);
3562 if (dev->region)
3563 release_mem_region (pci_resource_start (pdev, 0),
3564 pci_resource_len (pdev, 0));
3565 if (dev->enabled)
3566 pci_disable_device (pdev);
1da177e4 3567 device_remove_file (&pdev->dev, &dev_attr_registers);
1da177e4
LT
3568
3569 INFO (dev, "unbind\n");
1da177e4
LT
3570}
3571
3572/* wrap this driver around the specified device, but
3573 * don't respond over USB until a gadget driver binds to us.
3574 */
3575
3576static int net2280_probe (struct pci_dev *pdev, const struct pci_device_id *id)
3577{
3578 struct net2280 *dev;
3579 unsigned long resource, len;
3580 void __iomem *base = NULL;
3581 int retval, i;
1da177e4 3582
9a028e46
RRD
3583 if (!use_dma)
3584 use_dma_chaining = 0;
3585
1da177e4 3586 /* alloc, and start init */
e94b1766 3587 dev = kzalloc (sizeof *dev, GFP_KERNEL);
1da177e4
LT
3588 if (dev == NULL){
3589 retval = -ENOMEM;
3590 goto done;
3591 }
3592
9fb81ce6 3593 pci_set_drvdata (pdev, dev);
1da177e4
LT
3594 spin_lock_init (&dev->lock);
3595 dev->pdev = pdev;
3596 dev->gadget.ops = &net2280_ops;
c2db8a8a 3597 dev->gadget.max_speed = (dev->pdev->vendor == PCI_VENDOR_ID_PLX) ?
adc82f77 3598 USB_SPEED_SUPER : USB_SPEED_HIGH;
1da177e4
LT
3599
3600 /* the "gadget" abstracts/virtualizes the controller */
1da177e4
LT
3601 dev->gadget.name = driver_name;
3602
3603 /* now all the pci goodies ... */
3604 if (pci_enable_device (pdev) < 0) {
901b3d75 3605 retval = -ENODEV;
1da177e4
LT
3606 goto done;
3607 }
3608 dev->enabled = 1;
3609
3610 /* BAR 0 holds all the registers
3611 * BAR 1 is 8051 memory; unused here (note erratum 0103)
3612 * BAR 2 is fifo memory; unused here
3613 */
3614 resource = pci_resource_start (pdev, 0);
3615 len = pci_resource_len (pdev, 0);
3616 if (!request_mem_region (resource, len, driver_name)) {
3617 DEBUG (dev, "controller already in use\n");
3618 retval = -EBUSY;
3619 goto done;
3620 }
3621 dev->region = 1;
3622
901b3d75
DB
3623 /* FIXME provide firmware download interface to put
3624 * 8051 code into the chip, e.g. to turn on PCI PM.
3625 */
3626
1da177e4
LT
3627 base = ioremap_nocache (resource, len);
3628 if (base == NULL) {
3629 DEBUG (dev, "can't map memory\n");
3630 retval = -EFAULT;
3631 goto done;
3632 }
3633 dev->regs = (struct net2280_regs __iomem *) base;
3634 dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080);
3635 dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100);
3636 dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180);
3637 dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
3638 dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
3639
c2db8a8a 3640 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX) {
adc82f77
RRD
3641 u32 fsmvalue;
3642 u32 usbstat;
3643 dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *)
3644 (base + 0x00b4);
3645 dev->fiforegs = (struct usb338x_fifo_regs __iomem *)
3646 (base + 0x0500);
3647 dev->llregs = (struct usb338x_ll_regs __iomem *)
3648 (base + 0x0700);
3649 dev->ll_lfps_regs = (struct usb338x_ll_lfps_regs __iomem *)
3650 (base + 0x0748);
3651 dev->ll_tsn_regs = (struct usb338x_ll_tsn_regs __iomem *)
3652 (base + 0x077c);
3653 dev->ll_chicken_reg = (struct usb338x_ll_chi_regs __iomem *)
3654 (base + 0x079c);
3655 dev->plregs = (struct usb338x_pl_regs __iomem *)
3656 (base + 0x0800);
3657 usbstat = readl(&dev->usb->usbstat);
3e76fdcb 3658 dev->enhanced_mode = (usbstat & BIT(11)) ? 1 : 0;
adc82f77
RRD
3659 dev->n_ep = (dev->enhanced_mode) ? 9 : 5;
3660 /* put into initial config, link up all endpoints */
3661 fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
3662 (0xf << DEFECT7374_FSM_FIELD);
3663 /* See if firmware needs to set up for workaround: */
3664 if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ)
3665 writel(0, &dev->usb->usbctl);
3666 } else{
3667 dev->enhanced_mode = 0;
3668 dev->n_ep = 7;
3669 /* put into initial config, link up all endpoints */
3670 writel(0, &dev->usb->usbctl);
3671 }
3672
1da177e4
LT
3673 usb_reset (dev);
3674 usb_reinit (dev);
3675
3676 /* irq setup after old hardware is cleaned up */
3677 if (!pdev->irq) {
3678 ERROR (dev, "No IRQ. Check PCI setup!\n");
3679 retval = -ENODEV;
3680 goto done;
3681 }
c6387a48 3682
c2db8a8a 3683 if (use_msi && dev->pdev->vendor == PCI_VENDOR_ID_PLX)
adc82f77
RRD
3684 if (pci_enable_msi(pdev))
3685 ERROR(dev, "Failed to enable MSI mode\n");
3686
d54b5caa 3687 if (request_irq (pdev->irq, net2280_irq, IRQF_SHARED, driver_name, dev)
1da177e4 3688 != 0) {
c6387a48 3689 ERROR (dev, "request interrupt %d failed\n", pdev->irq);
1da177e4
LT
3690 retval = -EBUSY;
3691 goto done;
3692 }
3693 dev->got_irq = 1;
3694
3695 /* DMA setup */
3696 /* NOTE: we know only the 32 LSBs of dma addresses may be nonzero */
3697 dev->requests = pci_pool_create ("requests", pdev,
3698 sizeof (struct net2280_dma),
3699 0 /* no alignment requirements */,
3700 0 /* or page-crossing issues */);
3701 if (!dev->requests) {
3702 DEBUG (dev, "can't get request pool\n");
3703 retval = -ENOMEM;
3704 goto done;
3705 }
3706 for (i = 1; i < 5; i++) {
3707 struct net2280_dma *td;
3708
3709 td = pci_pool_alloc (dev->requests, GFP_KERNEL,
3710 &dev->ep [i].td_dma);
3711 if (!td) {
3712 DEBUG (dev, "can't get dummy %d\n", i);
3713 retval = -ENOMEM;
3714 goto done;
3715 }
3716 td->dmacount = 0; /* not VALID */
1da177e4
LT
3717 td->dmadesc = td->dmaaddr;
3718 dev->ep [i].dummy = td;
3719 }
3720
3721 /* enable lower-overhead pci memory bursts during DMA */
c2db8a8a 3722 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
3e76fdcb
RRD
3723 writel(BIT(DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE) |
3724 /*
3725 * 256 write retries may not be enough...
3726 BIT(PCI_RETRY_ABORT_ENABLE) |
3727 */
3728 BIT(DMA_READ_MULTIPLE_ENABLE) |
3729 BIT(DMA_READ_LINE_ENABLE),
3730 &dev->pci->pcimstctl);
1da177e4
LT
3731 /* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
3732 pci_set_master (pdev);
694625c0 3733 pci_try_set_mwi (pdev);
1da177e4
LT
3734
3735 /* ... also flushes any posted pci writes */
3736 dev->chiprev = get_idx_reg (dev->regs, REG_CHIPREV) & 0xffff;
3737
3738 /* done */
1da177e4 3739 INFO (dev, "%s\n", driver_desc);
c6387a48
DM
3740 INFO (dev, "irq %d, pci mem %p, chip rev %04x\n",
3741 pdev->irq, base, dev->chiprev);
adc82f77
RRD
3742 INFO(dev, "version: " DRIVER_VERSION "; dma %s %s\n",
3743 use_dma ? (use_dma_chaining ? "chaining" : "enabled")
3744 : "disabled",
3745 dev->enhanced_mode ? "enhanced mode" : "legacy mode");
b3899dac
JG
3746 retval = device_create_file (&pdev->dev, &dev_attr_registers);
3747 if (retval) goto done;
1da177e4 3748
2901df68
FB
3749 retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget,
3750 gadget_release);
0f91349b
SAS
3751 if (retval)
3752 goto done;
1da177e4
LT
3753 return 0;
3754
3755done:
3756 if (dev)
3757 net2280_remove (pdev);
3758 return retval;
3759}
3760
2d61bde7
AS
3761/* make sure the board is quiescent; otherwise it will continue
3762 * generating IRQs across the upcoming reboot.
3763 */
3764
3765static void net2280_shutdown (struct pci_dev *pdev)
3766{
3767 struct net2280 *dev = pci_get_drvdata (pdev);
3768
3769 /* disable IRQs */
3770 writel (0, &dev->regs->pciirqenb0);
3771 writel (0, &dev->regs->pciirqenb1);
3772
3773 /* disable the pullup so the host will think we're gone */
3774 writel (0, &dev->usb->usbctl);
2f076077
AS
3775
3776 /* Disable full-speed test mode */
c2db8a8a 3777 if (dev->pdev->vendor == PCI_VENDOR_ID_PLX_LEGACY)
adc82f77 3778 writel(0, &dev->usb->xcvrdiag);
2d61bde7
AS
3779}
3780
1da177e4
LT
3781
3782/*-------------------------------------------------------------------------*/
3783
901b3d75
DB
3784static const struct pci_device_id pci_ids [] = { {
3785 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3786 .class_mask = ~0,
c2db8a8a 3787 .vendor = PCI_VENDOR_ID_PLX_LEGACY,
1da177e4
LT
3788 .device = 0x2280,
3789 .subvendor = PCI_ANY_ID,
3790 .subdevice = PCI_ANY_ID,
950ee4c8 3791}, {
901b3d75
DB
3792 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3793 .class_mask = ~0,
c2db8a8a 3794 .vendor = PCI_VENDOR_ID_PLX_LEGACY,
950ee4c8
GL
3795 .device = 0x2282,
3796 .subvendor = PCI_ANY_ID,
3797 .subdevice = PCI_ANY_ID,
adc82f77
RRD
3798},
3799 {
3800 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3801 .class_mask = ~0,
c2db8a8a 3802 .vendor = PCI_VENDOR_ID_PLX,
adc82f77
RRD
3803 .device = 0x3380,
3804 .subvendor = PCI_ANY_ID,
3805 .subdevice = PCI_ANY_ID,
3806 },
3807 {
3808 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3809 .class_mask = ~0,
c2db8a8a 3810 .vendor = PCI_VENDOR_ID_PLX,
adc82f77
RRD
3811 .device = 0x3382,
3812 .subvendor = PCI_ANY_ID,
3813 .subdevice = PCI_ANY_ID,
3814 },
3815{ /* end: all zeroes */ }
1da177e4
LT
3816};
3817MODULE_DEVICE_TABLE (pci, pci_ids);
3818
3819/* pci driver glue; this is a "new style" PCI driver module */
3820static struct pci_driver net2280_pci_driver = {
3821 .name = (char *) driver_name,
3822 .id_table = pci_ids,
3823
3824 .probe = net2280_probe,
3825 .remove = net2280_remove,
2d61bde7 3826 .shutdown = net2280_shutdown,
1da177e4
LT
3827
3828 /* FIXME add power management support */
3829};
3830
9a028e46
RRD
3831module_pci_driver(net2280_pci_driver);
3832
1da177e4
LT
3833MODULE_DESCRIPTION (DRIVER_DESC);
3834MODULE_AUTHOR ("David Brownell");
3835MODULE_LICENSE ("GPL");