usb: gadget: udc: net2280: Move ASSERT_OUT_NAKING into out_flush
[linux-2.6-block.git] / drivers / usb / gadget / udc / net2280.c
CommitLineData
1da177e4
LT
1/*
2 * Driver for the PLX NET2280 USB device controller.
3 * Specs and errata are available from <http://www.plxtech.com>.
4 *
901b3d75 5 * PLX Technology Inc. (formerly NetChip Technology) supported the
1da177e4
LT
6 * development of this driver.
7 *
8 *
9 * CODE STATUS HIGHLIGHTS
10 *
11 * This driver should work well with most "gadget" drivers, including
fa06920a 12 * the Mass Storage, Serial, and Ethernet/RNDIS gadget drivers
1da177e4
LT
13 * as well as Gadget Zero and Gadgetfs.
14 *
90664198 15 * DMA is enabled by default.
1da177e4 16 *
adc82f77
RRD
17 * MSI is enabled by default. The legacy IRQ is used if MSI couldn't
18 * be enabled.
19 *
1da177e4
LT
20 * Note that almost all the errata workarounds here are only needed for
21 * rev1 chips. Rev1a silicon (0110) fixes almost all of them.
22 */
23
24/*
25 * Copyright (C) 2003 David Brownell
26 * Copyright (C) 2003-2005 PLX Technology, Inc.
adc82f77 27 * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS
1da177e4 28 *
901b3d75
DB
29 * Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility
30 * with 2282 chip
950ee4c8 31 *
adc82f77
RRD
32 * Modified Ricardo Ribalda Qtechnology AS to provide compatibility
33 * with usb 338x chip. Based on PLX driver
34 *
1da177e4
LT
35 * This program is free software; you can redistribute it and/or modify
36 * it under the terms of the GNU General Public License as published by
37 * the Free Software Foundation; either version 2 of the License, or
38 * (at your option) any later version.
1da177e4
LT
39 */
40
1da177e4
LT
41#include <linux/module.h>
42#include <linux/pci.h>
682d4c80 43#include <linux/dma-mapping.h>
1da177e4
LT
44#include <linux/kernel.h>
45#include <linux/delay.h>
46#include <linux/ioport.h>
1da177e4 47#include <linux/slab.h>
1da177e4
LT
48#include <linux/errno.h>
49#include <linux/init.h>
50#include <linux/timer.h>
51#include <linux/list.h>
52#include <linux/interrupt.h>
53#include <linux/moduleparam.h>
54#include <linux/device.h>
5f848137 55#include <linux/usb/ch9.h>
9454a57a 56#include <linux/usb/gadget.h>
b38b03b3 57#include <linux/prefetch.h>
fae3c158 58#include <linux/io.h>
1da177e4
LT
59
60#include <asm/byteorder.h>
1da177e4 61#include <asm/irq.h>
1da177e4
LT
62#include <asm/unaligned.h>
63
adc82f77
RRD
64#define DRIVER_DESC "PLX NET228x/USB338x USB Peripheral Controller"
65#define DRIVER_VERSION "2005 Sept 27/v3.0"
1da177e4 66
1da177e4
LT
67#define EP_DONTUSE 13 /* nonzero */
68
69#define USE_RDK_LEDS /* GPIO pins control three LEDs */
70
71
fae3c158
RRD
72static const char driver_name[] = "net2280";
73static const char driver_desc[] = DRIVER_DESC;
1da177e4 74
adc82f77 75static const u32 ep_bit[9] = { 0, 17, 2, 19, 4, 1, 18, 3, 20 };
fae3c158
RRD
76static const char ep0name[] = "ep0";
77static const char *const ep_name[] = {
1da177e4
LT
78 ep0name,
79 "ep-a", "ep-b", "ep-c", "ep-d",
adc82f77 80 "ep-e", "ep-f", "ep-g", "ep-h",
1da177e4
LT
81};
82
1da177e4
LT
83/* mode 0 == ep-{a,b,c,d} 1K fifo each
84 * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
85 * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
86 */
fae3c158 87static ushort fifo_mode;
1da177e4
LT
88
89/* "modprobe net2280 fifo_mode=1" etc */
ae8e530a 90module_param(fifo_mode, ushort, 0644);
1da177e4
LT
91
92/* enable_suspend -- When enabled, the driver will respond to
93 * USB suspend requests by powering down the NET2280. Otherwise,
25985edc 94 * USB suspend requests will be ignored. This is acceptable for
950ee4c8 95 * self-powered devices
1da177e4 96 */
00d4db0e 97static bool enable_suspend;
1da177e4
LT
98
99/* "modprobe net2280 enable_suspend=1" etc */
ae8e530a 100module_param(enable_suspend, bool, 0444);
1da177e4 101
1da177e4
LT
102#define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
103
fae3c158 104static char *type_string(u8 bmAttributes)
1da177e4
LT
105{
106 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
107 case USB_ENDPOINT_XFER_BULK: return "bulk";
108 case USB_ENDPOINT_XFER_ISOC: return "iso";
109 case USB_ENDPOINT_XFER_INT: return "intr";
2b84f92b 110 }
1da177e4
LT
111 return "control";
112}
1da177e4
LT
113
114#include "net2280.h"
115
3e76fdcb
RRD
116#define valid_bit cpu_to_le32(BIT(VALID_BIT))
117#define dma_done_ie cpu_to_le32(BIT(DMA_DONE_INTERRUPT_ENABLE))
1da177e4
LT
118
119/*-------------------------------------------------------------------------*/
adc82f77
RRD
120static inline void enable_pciirqenb(struct net2280_ep *ep)
121{
122 u32 tmp = readl(&ep->dev->regs->pciirqenb0);
123
2eeb0016 124 if (ep->dev->quirks & PLX_LEGACY)
3e76fdcb 125 tmp |= BIT(ep->num);
adc82f77 126 else
3e76fdcb 127 tmp |= BIT(ep_bit[ep->num]);
adc82f77
RRD
128 writel(tmp, &ep->dev->regs->pciirqenb0);
129
130 return;
131}
1da177e4
LT
132
133static int
fae3c158 134net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
1da177e4
LT
135{
136 struct net2280 *dev;
137 struct net2280_ep *ep;
138 u32 max, tmp;
139 unsigned long flags;
adc82f77 140 static const u32 ep_key[9] = { 1, 0, 1, 0, 1, 1, 0, 1, 0 };
1da177e4 141
fae3c158 142 ep = container_of(_ep, struct net2280_ep, ep);
ae8e530a
RRD
143 if (!_ep || !desc || ep->desc || _ep->name == ep0name ||
144 desc->bDescriptorType != USB_DT_ENDPOINT)
1da177e4
LT
145 return -EINVAL;
146 dev = ep->dev;
147 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
148 return -ESHUTDOWN;
149
150 /* erratum 0119 workaround ties up an endpoint number */
151 if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE)
152 return -EDOM;
153
2eeb0016 154 if (dev->quirks & PLX_SUPERSPEED) {
adc82f77
RRD
155 if ((desc->bEndpointAddress & 0x0f) >= 0x0c)
156 return -EDOM;
157 ep->is_in = !!usb_endpoint_dir_in(desc);
158 if (dev->enhanced_mode && ep->is_in && ep_key[ep->num])
159 return -EINVAL;
160 }
161
1da177e4 162 /* sanity check ep-e/ep-f since their fifos are small */
fae3c158 163 max = usb_endpoint_maxp(desc) & 0x1fff;
2eeb0016 164 if (ep->num > 4 && max > 64 && (dev->quirks & PLX_LEGACY))
1da177e4
LT
165 return -ERANGE;
166
fae3c158 167 spin_lock_irqsave(&dev->lock, flags);
1da177e4
LT
168 _ep->maxpacket = max & 0x7ff;
169 ep->desc = desc;
170
171 /* ep_reset() has already been called */
172 ep->stopped = 0;
8066134f 173 ep->wedged = 0;
1da177e4
LT
174 ep->out_overflow = 0;
175
176 /* set speed-dependent max packet; may kick in high bandwidth */
adc82f77 177 set_max_speed(ep, max);
1da177e4 178
1da177e4 179 /* set type, direction, address; reset fifo counters */
3e76fdcb 180 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
1da177e4
LT
181 tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
182 if (tmp == USB_ENDPOINT_XFER_INT) {
183 /* erratum 0105 workaround prevents hs NYET */
ae8e530a
RRD
184 if (dev->chiprev == 0100 &&
185 dev->gadget.speed == USB_SPEED_HIGH &&
186 !(desc->bEndpointAddress & USB_DIR_IN))
3e76fdcb 187 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE),
1da177e4
LT
188 &ep->regs->ep_rsp);
189 } else if (tmp == USB_ENDPOINT_XFER_BULK) {
190 /* catch some particularly blatant driver bugs */
adc82f77
RRD
191 if ((dev->gadget.speed == USB_SPEED_SUPER && max != 1024) ||
192 (dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
193 (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
194 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
195 return -ERANGE;
196 }
197 }
fae3c158 198 ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC);
adc82f77 199 /* Enable this endpoint */
2eeb0016 200 if (dev->quirks & PLX_LEGACY) {
adc82f77
RRD
201 tmp <<= ENDPOINT_TYPE;
202 tmp |= desc->bEndpointAddress;
203 /* default full fifo lines */
204 tmp |= (4 << ENDPOINT_BYTE_COUNT);
3e76fdcb 205 tmp |= BIT(ENDPOINT_ENABLE);
adc82f77
RRD
206 ep->is_in = (tmp & USB_DIR_IN) != 0;
207 } else {
208 /* In Legacy mode, only OUT endpoints are used */
209 if (dev->enhanced_mode && ep->is_in) {
210 tmp <<= IN_ENDPOINT_TYPE;
3e76fdcb 211 tmp |= BIT(IN_ENDPOINT_ENABLE);
adc82f77 212 /* Not applicable to Legacy */
3e76fdcb 213 tmp |= BIT(ENDPOINT_DIRECTION);
adc82f77
RRD
214 } else {
215 tmp <<= OUT_ENDPOINT_TYPE;
3e76fdcb 216 tmp |= BIT(OUT_ENDPOINT_ENABLE);
adc82f77
RRD
217 tmp |= (ep->is_in << ENDPOINT_DIRECTION);
218 }
219
220 tmp |= usb_endpoint_num(desc);
221 tmp |= (ep->ep.maxburst << MAX_BURST_SIZE);
222 }
223
224 /* Make sure all the registers are written before ep_rsp*/
225 wmb();
1da177e4
LT
226
227 /* for OUT transfers, block the rx fifo until a read is posted */
1da177e4 228 if (!ep->is_in)
3e76fdcb 229 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
2eeb0016 230 else if (!(dev->quirks & PLX_2280)) {
901b3d75
DB
231 /* Added for 2282, Don't use nak packets on an in endpoint,
232 * this was ignored on 2280
233 */
3e76fdcb
RRD
234 writel(BIT(CLEAR_NAK_OUT_PACKETS) |
235 BIT(CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp);
950ee4c8 236 }
1da177e4 237
adc82f77 238 writel(tmp, &ep->cfg->ep_cfg);
1da177e4
LT
239
240 /* enable irqs */
241 if (!ep->dma) { /* pio, per-packet */
adc82f77 242 enable_pciirqenb(ep);
1da177e4 243
3e76fdcb
RRD
244 tmp = BIT(DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) |
245 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE);
2eeb0016 246 if (dev->quirks & PLX_2280)
fae3c158
RRD
247 tmp |= readl(&ep->regs->ep_irqenb);
248 writel(tmp, &ep->regs->ep_irqenb);
1da177e4 249 } else { /* dma, per-request */
3e76fdcb 250 tmp = BIT((8 + ep->num)); /* completion */
fae3c158
RRD
251 tmp |= readl(&dev->regs->pciirqenb1);
252 writel(tmp, &dev->regs->pciirqenb1);
1da177e4
LT
253
254 /* for short OUT transfers, dma completions can't
255 * advance the queue; do it pio-style, by hand.
256 * NOTE erratum 0112 workaround #2
257 */
258 if ((desc->bEndpointAddress & USB_DIR_IN) == 0) {
3e76fdcb 259 tmp = BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE);
fae3c158 260 writel(tmp, &ep->regs->ep_irqenb);
1da177e4 261
adc82f77 262 enable_pciirqenb(ep);
1da177e4
LT
263 }
264 }
265
266 tmp = desc->bEndpointAddress;
e56e69cc 267 ep_dbg(dev, "enabled %s (ep%d%s-%s) %s max %04x\n",
fae3c158
RRD
268 _ep->name, tmp & 0x0f, DIR_STRING(tmp),
269 type_string(desc->bmAttributes),
1da177e4
LT
270 ep->dma ? "dma" : "pio", max);
271
272 /* pci writes may still be posted */
fae3c158 273 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
274 return 0;
275}
276
fae3c158 277static int handshake(u32 __iomem *ptr, u32 mask, u32 done, int usec)
1da177e4
LT
278{
279 u32 result;
280
281 do {
fae3c158 282 result = readl(ptr);
1da177e4
LT
283 if (result == ~(u32)0) /* "device unplugged" */
284 return -ENODEV;
285 result &= mask;
286 if (result == done)
287 return 0;
fae3c158 288 udelay(1);
1da177e4
LT
289 usec--;
290 } while (usec > 0);
291 return -ETIMEDOUT;
292}
293
901b3d75 294static const struct usb_ep_ops net2280_ep_ops;
1da177e4 295
adc82f77
RRD
296static void ep_reset_228x(struct net2280_regs __iomem *regs,
297 struct net2280_ep *ep)
1da177e4
LT
298{
299 u32 tmp;
300
301 ep->desc = NULL;
fae3c158 302 INIT_LIST_HEAD(&ep->queue);
1da177e4 303
e117e742 304 usb_ep_set_maxpacket_limit(&ep->ep, ~0);
1da177e4
LT
305 ep->ep.ops = &net2280_ep_ops;
306
307 /* disable the dma, irqs, endpoint... */
308 if (ep->dma) {
fae3c158 309 writel(0, &ep->dma->dmactl);
3e76fdcb
RRD
310 writel(BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
311 BIT(DMA_TRANSACTION_DONE_INTERRUPT) |
312 BIT(DMA_ABORT),
313 &ep->dma->dmastat);
1da177e4 314
fae3c158 315 tmp = readl(&regs->pciirqenb0);
3e76fdcb 316 tmp &= ~BIT(ep->num);
fae3c158 317 writel(tmp, &regs->pciirqenb0);
1da177e4 318 } else {
fae3c158 319 tmp = readl(&regs->pciirqenb1);
3e76fdcb 320 tmp &= ~BIT((8 + ep->num)); /* completion */
fae3c158 321 writel(tmp, &regs->pciirqenb1);
1da177e4 322 }
fae3c158 323 writel(0, &ep->regs->ep_irqenb);
1da177e4
LT
324
325 /* init to our chosen defaults, notably so that we NAK OUT
326 * packets until the driver queues a read (+note erratum 0112)
327 */
2eeb0016 328 if (!ep->is_in || (ep->dev->quirks & PLX_2280)) {
3e76fdcb
RRD
329 tmp = BIT(SET_NAK_OUT_PACKETS_MODE) |
330 BIT(SET_NAK_OUT_PACKETS) |
331 BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
332 BIT(CLEAR_INTERRUPT_MODE);
950ee4c8
GL
333 } else {
334 /* added for 2282 */
3e76fdcb
RRD
335 tmp = BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
336 BIT(CLEAR_NAK_OUT_PACKETS) |
337 BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
338 BIT(CLEAR_INTERRUPT_MODE);
950ee4c8 339 }
1da177e4
LT
340
341 if (ep->num != 0) {
3e76fdcb
RRD
342 tmp |= BIT(CLEAR_ENDPOINT_TOGGLE) |
343 BIT(CLEAR_ENDPOINT_HALT);
1da177e4 344 }
fae3c158 345 writel(tmp, &ep->regs->ep_rsp);
1da177e4
LT
346
347 /* scrub most status bits, and flush any fifo state */
2eeb0016 348 if (ep->dev->quirks & PLX_2280)
3e76fdcb
RRD
349 tmp = BIT(FIFO_OVERFLOW) |
350 BIT(FIFO_UNDERFLOW);
950ee4c8
GL
351 else
352 tmp = 0;
353
3e76fdcb
RRD
354 writel(tmp | BIT(TIMEOUT) |
355 BIT(USB_STALL_SENT) |
356 BIT(USB_IN_NAK_SENT) |
357 BIT(USB_IN_ACK_RCVD) |
358 BIT(USB_OUT_PING_NAK_SENT) |
359 BIT(USB_OUT_ACK_SENT) |
360 BIT(FIFO_FLUSH) |
361 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
362 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
363 BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
364 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
365 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
ae8e530a
RRD
366 BIT(DATA_IN_TOKEN_INTERRUPT),
367 &ep->regs->ep_stat);
1da177e4
LT
368
369 /* fifo size is handled separately */
370}
371
adc82f77
RRD
372static void ep_reset_338x(struct net2280_regs __iomem *regs,
373 struct net2280_ep *ep)
374{
375 u32 tmp, dmastat;
376
377 ep->desc = NULL;
378 INIT_LIST_HEAD(&ep->queue);
379
380 usb_ep_set_maxpacket_limit(&ep->ep, ~0);
381 ep->ep.ops = &net2280_ep_ops;
382
383 /* disable the dma, irqs, endpoint... */
384 if (ep->dma) {
385 writel(0, &ep->dma->dmactl);
3e76fdcb
RRD
386 writel(BIT(DMA_ABORT_DONE_INTERRUPT) |
387 BIT(DMA_PAUSE_DONE_INTERRUPT) |
388 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
ae8e530a
RRD
389 BIT(DMA_TRANSACTION_DONE_INTERRUPT),
390 /* | BIT(DMA_ABORT), */
391 &ep->dma->dmastat);
adc82f77
RRD
392
393 dmastat = readl(&ep->dma->dmastat);
394 if (dmastat == 0x5002) {
e56e69cc 395 ep_warn(ep->dev, "The dmastat return = %x!!\n",
adc82f77
RRD
396 dmastat);
397 writel(0x5a, &ep->dma->dmastat);
398 }
399
400 tmp = readl(&regs->pciirqenb0);
3e76fdcb 401 tmp &= ~BIT(ep_bit[ep->num]);
adc82f77
RRD
402 writel(tmp, &regs->pciirqenb0);
403 } else {
404 if (ep->num < 5) {
405 tmp = readl(&regs->pciirqenb1);
3e76fdcb 406 tmp &= ~BIT((8 + ep->num)); /* completion */
adc82f77
RRD
407 writel(tmp, &regs->pciirqenb1);
408 }
409 }
410 writel(0, &ep->regs->ep_irqenb);
411
3e76fdcb
RRD
412 writel(BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
413 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
414 BIT(FIFO_OVERFLOW) |
415 BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
416 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
417 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
418 BIT(DATA_IN_TOKEN_INTERRUPT), &ep->regs->ep_stat);
adc82f77
RRD
419}
420
fae3c158 421static void nuke(struct net2280_ep *);
1da177e4 422
fae3c158 423static int net2280_disable(struct usb_ep *_ep)
1da177e4
LT
424{
425 struct net2280_ep *ep;
426 unsigned long flags;
427
fae3c158 428 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
429 if (!_ep || !ep->desc || _ep->name == ep0name)
430 return -EINVAL;
431
fae3c158
RRD
432 spin_lock_irqsave(&ep->dev->lock, flags);
433 nuke(ep);
adc82f77 434
2eeb0016 435 if (ep->dev->quirks & PLX_SUPERSPEED)
adc82f77
RRD
436 ep_reset_338x(ep->dev->regs, ep);
437 else
438 ep_reset_228x(ep->dev->regs, ep);
1da177e4 439
e56e69cc 440 ep_vdbg(ep->dev, "disabled %s %s\n",
1da177e4
LT
441 ep->dma ? "dma" : "pio", _ep->name);
442
443 /* synch memory views with the device */
adc82f77 444 (void)readl(&ep->cfg->ep_cfg);
1da177e4 445
d588ff58 446 if (!ep->dma && ep->num >= 1 && ep->num <= 4)
fae3c158 447 ep->dma = &ep->dev->dma[ep->num - 1];
1da177e4 448
fae3c158 449 spin_unlock_irqrestore(&ep->dev->lock, flags);
1da177e4
LT
450 return 0;
451}
452
453/*-------------------------------------------------------------------------*/
454
fae3c158
RRD
455static struct usb_request
456*net2280_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
1da177e4
LT
457{
458 struct net2280_ep *ep;
459 struct net2280_request *req;
460
461 if (!_ep)
462 return NULL;
fae3c158 463 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4 464
7039f422 465 req = kzalloc(sizeof(*req), gfp_flags);
1da177e4
LT
466 if (!req)
467 return NULL;
468
fae3c158 469 INIT_LIST_HEAD(&req->queue);
1da177e4
LT
470
471 /* this dma descriptor may be swapped with the previous dummy */
472 if (ep->dma) {
473 struct net2280_dma *td;
474
fae3c158 475 td = pci_pool_alloc(ep->dev->requests, gfp_flags,
1da177e4
LT
476 &req->td_dma);
477 if (!td) {
fae3c158 478 kfree(req);
1da177e4
LT
479 return NULL;
480 }
481 td->dmacount = 0; /* not VALID */
1da177e4
LT
482 td->dmadesc = td->dmaaddr;
483 req->td = td;
484 }
485 return &req->req;
486}
487
fae3c158 488static void net2280_free_request(struct usb_ep *_ep, struct usb_request *_req)
1da177e4
LT
489{
490 struct net2280_ep *ep;
491 struct net2280_request *req;
492
fae3c158 493 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
494 if (!_ep || !_req)
495 return;
496
fae3c158
RRD
497 req = container_of(_req, struct net2280_request, req);
498 WARN_ON(!list_empty(&req->queue));
1da177e4 499 if (req->td)
fae3c158
RRD
500 pci_pool_free(ep->dev->requests, req->td, req->td_dma);
501 kfree(req);
1da177e4
LT
502}
503
504/*-------------------------------------------------------------------------*/
505
1da177e4
LT
506/* load a packet into the fifo we use for usb IN transfers.
507 * works for all endpoints.
508 *
509 * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
510 * at a time, but this code is simpler because it knows it only writes
511 * one packet. ep-a..ep-d should use dma instead.
512 */
fae3c158 513static void write_fifo(struct net2280_ep *ep, struct usb_request *req)
1da177e4
LT
514{
515 struct net2280_ep_regs __iomem *regs = ep->regs;
516 u8 *buf;
517 u32 tmp;
518 unsigned count, total;
519
520 /* INVARIANT: fifo is currently empty. (testable) */
521
522 if (req) {
523 buf = req->buf + req->actual;
fae3c158 524 prefetch(buf);
1da177e4
LT
525 total = req->length - req->actual;
526 } else {
527 total = 0;
528 buf = NULL;
529 }
530
531 /* write just one packet at a time */
532 count = ep->ep.maxpacket;
533 if (count > total) /* min() cannot be used on a bitfield */
534 count = total;
535
e56e69cc 536 ep_vdbg(ep->dev, "write %s fifo (IN) %d bytes%s req %p\n",
1da177e4
LT
537 ep->ep.name, count,
538 (count != ep->ep.maxpacket) ? " (short)" : "",
539 req);
540 while (count >= 4) {
541 /* NOTE be careful if you try to align these. fifo lines
542 * should normally be full (4 bytes) and successive partial
543 * lines are ok only in certain cases.
544 */
fae3c158
RRD
545 tmp = get_unaligned((u32 *)buf);
546 cpu_to_le32s(&tmp);
547 writel(tmp, &regs->ep_data);
1da177e4
LT
548 buf += 4;
549 count -= 4;
550 }
551
552 /* last fifo entry is "short" unless we wrote a full packet.
553 * also explicitly validate last word in (periodic) transfers
554 * when maxpacket is not a multiple of 4 bytes.
555 */
556 if (count || total < ep->ep.maxpacket) {
fae3c158
RRD
557 tmp = count ? get_unaligned((u32 *)buf) : count;
558 cpu_to_le32s(&tmp);
559 set_fifo_bytecount(ep, count & 0x03);
560 writel(tmp, &regs->ep_data);
1da177e4
LT
561 }
562
563 /* pci writes may still be posted */
564}
565
566/* work around erratum 0106: PCI and USB race over the OUT fifo.
567 * caller guarantees chiprev 0100, out endpoint is NAKing, and
568 * there's no real data in the fifo.
569 *
570 * NOTE: also used in cases where that erratum doesn't apply:
571 * where the host wrote "too much" data to us.
572 */
fae3c158 573static void out_flush(struct net2280_ep *ep)
1da177e4
LT
574{
575 u32 __iomem *statp;
576 u32 tmp;
577
1da177e4 578 statp = &ep->regs->ep_stat;
d82f3db2
RRD
579
580 tmp = readl(statp);
581 if (tmp & BIT(NAK_OUT_PACKETS)) {
582 ep_dbg(ep->dev, "%s %s %08x !NAK\n",
583 ep->ep.name, __func__, tmp);
584 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
585 }
586
3e76fdcb 587 writel(BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
ae8e530a
RRD
588 BIT(DATA_PACKET_RECEIVED_INTERRUPT),
589 statp);
3e76fdcb 590 writel(BIT(FIFO_FLUSH), statp);
fae3c158
RRD
591 /* Make sure that stap is written */
592 mb();
593 tmp = readl(statp);
ae8e530a 594 if (tmp & BIT(DATA_OUT_PING_TOKEN_INTERRUPT) &&
1da177e4 595 /* high speed did bulk NYET; fifo isn't filling */
ae8e530a 596 ep->dev->gadget.speed == USB_SPEED_FULL) {
1da177e4
LT
597 unsigned usec;
598
599 usec = 50; /* 64 byte bulk/interrupt */
3e76fdcb
RRD
600 handshake(statp, BIT(USB_OUT_PING_NAK_SENT),
601 BIT(USB_OUT_PING_NAK_SENT), usec);
1da177e4
LT
602 /* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */
603 }
604}
605
606/* unload packet(s) from the fifo we use for usb OUT transfers.
607 * returns true iff the request completed, because of short packet
608 * or the request buffer having filled with full packets.
609 *
610 * for ep-a..ep-d this will read multiple packets out when they
611 * have been accepted.
612 */
fae3c158 613static int read_fifo(struct net2280_ep *ep, struct net2280_request *req)
1da177e4
LT
614{
615 struct net2280_ep_regs __iomem *regs = ep->regs;
616 u8 *buf = req->req.buf + req->req.actual;
617 unsigned count, tmp, is_short;
618 unsigned cleanup = 0, prevent = 0;
619
620 /* erratum 0106 ... packets coming in during fifo reads might
621 * be incompletely rejected. not all cases have workarounds.
622 */
ae8e530a
RRD
623 if (ep->dev->chiprev == 0x0100 &&
624 ep->dev->gadget.speed == USB_SPEED_FULL) {
fae3c158
RRD
625 udelay(1);
626 tmp = readl(&ep->regs->ep_stat);
3e76fdcb 627 if ((tmp & BIT(NAK_OUT_PACKETS)))
1da177e4 628 cleanup = 1;
3e76fdcb 629 else if ((tmp & BIT(FIFO_FULL))) {
fae3c158 630 start_out_naking(ep);
1da177e4
LT
631 prevent = 1;
632 }
633 /* else: hope we don't see the problem */
634 }
635
636 /* never overflow the rx buffer. the fifo reads packets until
637 * it sees a short one; we might not be ready for them all.
638 */
fae3c158
RRD
639 prefetchw(buf);
640 count = readl(&regs->ep_avail);
641 if (unlikely(count == 0)) {
642 udelay(1);
643 tmp = readl(&ep->regs->ep_stat);
644 count = readl(&regs->ep_avail);
1da177e4 645 /* handled that data already? */
3e76fdcb 646 if (count == 0 && (tmp & BIT(NAK_OUT_PACKETS)) == 0)
1da177e4
LT
647 return 0;
648 }
649
650 tmp = req->req.length - req->req.actual;
651 if (count > tmp) {
652 /* as with DMA, data overflow gets flushed */
653 if ((tmp % ep->ep.maxpacket) != 0) {
e56e69cc 654 ep_err(ep->dev,
1da177e4
LT
655 "%s out fifo %d bytes, expected %d\n",
656 ep->ep.name, count, tmp);
657 req->req.status = -EOVERFLOW;
658 cleanup = 1;
659 /* NAK_OUT_PACKETS will be set, so flushing is safe;
660 * the next read will start with the next packet
661 */
662 } /* else it's a ZLP, no worries */
663 count = tmp;
664 }
665 req->req.actual += count;
666
667 is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0);
668
e56e69cc 669 ep_vdbg(ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n",
1da177e4
LT
670 ep->ep.name, count, is_short ? " (short)" : "",
671 cleanup ? " flush" : "", prevent ? " nak" : "",
672 req, req->req.actual, req->req.length);
673
674 while (count >= 4) {
fae3c158
RRD
675 tmp = readl(&regs->ep_data);
676 cpu_to_le32s(&tmp);
677 put_unaligned(tmp, (u32 *)buf);
1da177e4
LT
678 buf += 4;
679 count -= 4;
680 }
681 if (count) {
fae3c158 682 tmp = readl(&regs->ep_data);
1da177e4
LT
683 /* LE conversion is implicit here: */
684 do {
685 *buf++ = (u8) tmp;
686 tmp >>= 8;
687 } while (--count);
688 }
689 if (cleanup)
fae3c158 690 out_flush(ep);
1da177e4 691 if (prevent) {
3e76fdcb 692 writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
fae3c158 693 (void) readl(&ep->regs->ep_rsp);
1da177e4
LT
694 }
695
ae8e530a
RRD
696 return is_short || ((req->req.actual == req->req.length) &&
697 !req->req.zero);
1da177e4
LT
698}
699
700/* fill out dma descriptor to match a given request */
fae3c158
RRD
701static void fill_dma_desc(struct net2280_ep *ep,
702 struct net2280_request *req, int valid)
1da177e4
LT
703{
704 struct net2280_dma *td = req->td;
705 u32 dmacount = req->req.length;
706
707 /* don't let DMA continue after a short OUT packet,
708 * so overruns can't affect the next transfer.
709 * in case of overruns on max-size packets, we can't
710 * stop the fifo from filling but we can flush it.
711 */
712 if (ep->is_in)
3e76fdcb 713 dmacount |= BIT(DMA_DIRECTION);
ae8e530a 714 if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0) ||
2eeb0016 715 !(ep->dev->quirks & PLX_2280))
3e76fdcb 716 dmacount |= BIT(END_OF_CHAIN);
1da177e4
LT
717
718 req->valid = valid;
719 if (valid)
3e76fdcb 720 dmacount |= BIT(VALID_BIT);
90664198 721 dmacount |= BIT(DMA_DONE_INTERRUPT_ENABLE);
1da177e4
LT
722
723 /* td->dmadesc = previously set by caller */
724 td->dmaaddr = cpu_to_le32 (req->req.dma);
725
726 /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
fae3c158 727 wmb();
da2bbdcc 728 td->dmacount = cpu_to_le32(dmacount);
1da177e4
LT
729}
730
731static const u32 dmactl_default =
3e76fdcb
RRD
732 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
733 BIT(DMA_CLEAR_COUNT_ENABLE) |
1da177e4 734 /* erratum 0116 workaround part 1 (use POLLING) */
3e76fdcb
RRD
735 (POLL_100_USEC << DESCRIPTOR_POLLING_RATE) |
736 BIT(DMA_VALID_BIT_POLLING_ENABLE) |
737 BIT(DMA_VALID_BIT_ENABLE) |
738 BIT(DMA_SCATTER_GATHER_ENABLE) |
1da177e4 739 /* erratum 0116 workaround part 2 (no AUTOSTART) */
3e76fdcb 740 BIT(DMA_ENABLE);
1da177e4 741
fae3c158 742static inline void spin_stop_dma(struct net2280_dma_regs __iomem *dma)
1da177e4 743{
3e76fdcb 744 handshake(&dma->dmactl, BIT(DMA_ENABLE), 0, 50);
1da177e4
LT
745}
746
fae3c158 747static inline void stop_dma(struct net2280_dma_regs __iomem *dma)
1da177e4 748{
3e76fdcb 749 writel(readl(&dma->dmactl) & ~BIT(DMA_ENABLE), &dma->dmactl);
fae3c158 750 spin_stop_dma(dma);
1da177e4
LT
751}
752
fae3c158 753static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
1da177e4
LT
754{
755 struct net2280_dma_regs __iomem *dma = ep->dma;
3e76fdcb 756 unsigned int tmp = BIT(VALID_BIT) | (ep->is_in << DMA_DIRECTION);
1da177e4 757
2eeb0016 758 if (!(ep->dev->quirks & PLX_2280))
3e76fdcb 759 tmp |= BIT(END_OF_CHAIN);
950ee4c8 760
fae3c158
RRD
761 writel(tmp, &dma->dmacount);
762 writel(readl(&dma->dmastat), &dma->dmastat);
1da177e4 763
fae3c158 764 writel(td_dma, &dma->dmadesc);
2eeb0016 765 if (ep->dev->quirks & PLX_SUPERSPEED)
3e76fdcb 766 dmactl |= BIT(DMA_REQUEST_OUTSTANDING);
fae3c158 767 writel(dmactl, &dma->dmactl);
1da177e4
LT
768
769 /* erratum 0116 workaround part 3: pci arbiter away from net2280 */
fae3c158 770 (void) readl(&ep->dev->pci->pcimstctl);
1da177e4 771
3e76fdcb 772 writel(BIT(DMA_START), &dma->dmastat);
1da177e4
LT
773
774 if (!ep->is_in)
fae3c158 775 stop_out_naking(ep);
1da177e4
LT
776}
777
fae3c158 778static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
1da177e4
LT
779{
780 u32 tmp;
781 struct net2280_dma_regs __iomem *dma = ep->dma;
782
783 /* FIXME can't use DMA for ZLPs */
784
785 /* on this path we "know" there's no dma active (yet) */
3e76fdcb 786 WARN_ON(readl(&dma->dmactl) & BIT(DMA_ENABLE));
fae3c158 787 writel(0, &ep->dma->dmactl);
1da177e4
LT
788
789 /* previous OUT packet might have been short */
fae3c158
RRD
790 if (!ep->is_in && (readl(&ep->regs->ep_stat) &
791 BIT(NAK_OUT_PACKETS))) {
3e76fdcb 792 writel(BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT),
1da177e4
LT
793 &ep->regs->ep_stat);
794
fae3c158 795 tmp = readl(&ep->regs->ep_avail);
1da177e4 796 if (tmp) {
fae3c158 797 writel(readl(&dma->dmastat), &dma->dmastat);
1da177e4
LT
798
799 /* transfer all/some fifo data */
fae3c158
RRD
800 writel(req->req.dma, &dma->dmaaddr);
801 tmp = min(tmp, req->req.length);
1da177e4
LT
802
803 /* dma irq, faking scatterlist status */
fae3c158 804 req->td->dmacount = cpu_to_le32(req->req.length - tmp);
ae8e530a
RRD
805 writel(BIT(DMA_DONE_INTERRUPT_ENABLE) | tmp,
806 &dma->dmacount);
1da177e4
LT
807 req->td->dmadesc = 0;
808 req->valid = 1;
809
3e76fdcb
RRD
810 writel(BIT(DMA_ENABLE), &dma->dmactl);
811 writel(BIT(DMA_START), &dma->dmastat);
1da177e4
LT
812 return;
813 }
814 }
815
816 tmp = dmactl_default;
817
818 /* force packet boundaries between dma requests, but prevent the
819 * controller from automagically writing a last "short" packet
820 * (zero length) unless the driver explicitly said to do that.
821 */
822 if (ep->is_in) {
fae3c158
RRD
823 if (likely((req->req.length % ep->ep.maxpacket) ||
824 req->req.zero)){
3e76fdcb 825 tmp |= BIT(DMA_FIFO_VALIDATE);
1da177e4
LT
826 ep->in_fifo_validate = 1;
827 } else
828 ep->in_fifo_validate = 0;
829 }
830
831 /* init req->td, pointing to the current dummy */
832 req->td->dmadesc = cpu_to_le32 (ep->td_dma);
fae3c158 833 fill_dma_desc(ep, req, 1);
1da177e4 834
90664198 835 req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN));
1da177e4 836
fae3c158 837 start_queue(ep, tmp, req->td_dma);
1da177e4
LT
838}
839
840static inline void
fae3c158 841queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid)
1da177e4
LT
842{
843 struct net2280_dma *end;
844 dma_addr_t tmp;
845
846 /* swap new dummy for old, link; fill and maybe activate */
847 end = ep->dummy;
848 ep->dummy = req->td;
849 req->td = end;
850
851 tmp = ep->td_dma;
852 ep->td_dma = req->td_dma;
853 req->td_dma = tmp;
854
855 end->dmadesc = cpu_to_le32 (ep->td_dma);
856
fae3c158 857 fill_dma_desc(ep, req, valid);
1da177e4
LT
858}
859
860static void
fae3c158 861done(struct net2280_ep *ep, struct net2280_request *req, int status)
1da177e4
LT
862{
863 struct net2280 *dev;
864 unsigned stopped = ep->stopped;
865
fae3c158 866 list_del_init(&req->queue);
1da177e4
LT
867
868 if (req->req.status == -EINPROGRESS)
869 req->req.status = status;
870 else
871 status = req->req.status;
872
873 dev = ep->dev;
ae4d7933
FB
874 if (ep->dma)
875 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
1da177e4
LT
876
877 if (status && status != -ESHUTDOWN)
e56e69cc 878 ep_vdbg(dev, "complete %s req %p stat %d len %u/%u\n",
1da177e4
LT
879 ep->ep.name, &req->req, status,
880 req->req.actual, req->req.length);
881
882 /* don't modify queue heads during completion callback */
883 ep->stopped = 1;
fae3c158 884 spin_unlock(&dev->lock);
304f7e5e 885 usb_gadget_giveback_request(&ep->ep, &req->req);
fae3c158 886 spin_lock(&dev->lock);
1da177e4
LT
887 ep->stopped = stopped;
888}
889
890/*-------------------------------------------------------------------------*/
891
892static int
fae3c158 893net2280_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
1da177e4
LT
894{
895 struct net2280_request *req;
896 struct net2280_ep *ep;
897 struct net2280 *dev;
898 unsigned long flags;
899
900 /* we always require a cpu-view buffer, so that we can
901 * always use pio (as fallback or whatever).
902 */
fae3c158
RRD
903 req = container_of(_req, struct net2280_request, req);
904 if (!_req || !_req->complete || !_req->buf ||
905 !list_empty(&req->queue))
1da177e4
LT
906 return -EINVAL;
907 if (_req->length > (~0 & DMA_BYTE_COUNT_MASK))
908 return -EDOM;
fae3c158 909 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
910 if (!_ep || (!ep->desc && ep->num != 0))
911 return -EINVAL;
912 dev = ep->dev;
913 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
914 return -ESHUTDOWN;
915
916 /* FIXME implement PIO fallback for ZLPs with DMA */
917 if (ep->dma && _req->length == 0)
918 return -EOPNOTSUPP;
919
920 /* set up dma mapping in case the caller didn't */
ae4d7933
FB
921 if (ep->dma) {
922 int ret;
923
924 ret = usb_gadget_map_request(&dev->gadget, _req,
925 ep->is_in);
926 if (ret)
927 return ret;
1da177e4
LT
928 }
929
930#if 0
e56e69cc 931 ep_vdbg(dev, "%s queue req %p, len %d buf %p\n",
1da177e4
LT
932 _ep->name, _req, _req->length, _req->buf);
933#endif
934
fae3c158 935 spin_lock_irqsave(&dev->lock, flags);
1da177e4
LT
936
937 _req->status = -EINPROGRESS;
938 _req->actual = 0;
939
940 /* kickstart this i/o queue? */
485f44d0
RRD
941 if (list_empty(&ep->queue) && !ep->stopped &&
942 !((dev->quirks & PLX_SUPERSPEED) && ep->dma &&
943 (readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)))) {
944
1da177e4 945 /* use DMA if the endpoint supports it, else pio */
485f44d0 946 if (ep->dma)
fae3c158 947 start_dma(ep, req);
1da177e4
LT
948 else {
949 /* maybe there's no control data, just status ack */
950 if (ep->num == 0 && _req->length == 0) {
fae3c158
RRD
951 allow_status(ep);
952 done(ep, req, 0);
e56e69cc 953 ep_vdbg(dev, "%s status ack\n", ep->ep.name);
1da177e4
LT
954 goto done;
955 }
956
957 /* PIO ... stuff the fifo, or unblock it. */
958 if (ep->is_in)
fae3c158
RRD
959 write_fifo(ep, _req);
960 else if (list_empty(&ep->queue)) {
1da177e4
LT
961 u32 s;
962
963 /* OUT FIFO might have packet(s) buffered */
fae3c158 964 s = readl(&ep->regs->ep_stat);
3e76fdcb 965 if ((s & BIT(FIFO_EMPTY)) == 0) {
1da177e4
LT
966 /* note: _req->short_not_ok is
967 * ignored here since PIO _always_
968 * stops queue advance here, and
969 * _req->status doesn't change for
970 * short reads (only _req->actual)
971 */
fae3c158
RRD
972 if (read_fifo(ep, req) &&
973 ep->num == 0) {
974 done(ep, req, 0);
975 allow_status(ep);
1da177e4
LT
976 /* don't queue it */
977 req = NULL;
fae3c158
RRD
978 } else if (read_fifo(ep, req) &&
979 ep->num != 0) {
980 done(ep, req, 0);
981 req = NULL;
1da177e4 982 } else
fae3c158 983 s = readl(&ep->regs->ep_stat);
1da177e4
LT
984 }
985
986 /* don't NAK, let the fifo fill */
3e76fdcb
RRD
987 if (req && (s & BIT(NAK_OUT_PACKETS)))
988 writel(BIT(CLEAR_NAK_OUT_PACKETS),
1da177e4
LT
989 &ep->regs->ep_rsp);
990 }
991 }
992
993 } else if (ep->dma) {
994 int valid = 1;
995
996 if (ep->is_in) {
997 int expect;
998
999 /* preventing magic zlps is per-engine state, not
1000 * per-transfer; irq logic must recover hiccups.
1001 */
fae3c158
RRD
1002 expect = likely(req->req.zero ||
1003 (req->req.length % ep->ep.maxpacket));
1da177e4
LT
1004 if (expect != ep->in_fifo_validate)
1005 valid = 0;
1006 }
fae3c158 1007 queue_dma(ep, req, valid);
1da177e4
LT
1008
1009 } /* else the irq handler advances the queue. */
1010
1f26e28d 1011 ep->responded = 1;
1da177e4 1012 if (req)
fae3c158 1013 list_add_tail(&req->queue, &ep->queue);
1da177e4 1014done:
fae3c158 1015 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1016
1017 /* pci writes may still be posted */
1018 return 0;
1019}
1020
1021static inline void
fae3c158
RRD
1022dma_done(struct net2280_ep *ep, struct net2280_request *req, u32 dmacount,
1023 int status)
1da177e4
LT
1024{
1025 req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount);
fae3c158 1026 done(ep, req, status);
1da177e4
LT
1027}
1028
fae3c158 1029static void scan_dma_completions(struct net2280_ep *ep)
1da177e4
LT
1030{
1031 /* only look at descriptors that were "naturally" retired,
1032 * so fifo and list head state won't matter
1033 */
fae3c158 1034 while (!list_empty(&ep->queue)) {
1da177e4
LT
1035 struct net2280_request *req;
1036 u32 tmp;
1037
fae3c158 1038 req = list_entry(ep->queue.next,
1da177e4
LT
1039 struct net2280_request, queue);
1040 if (!req->valid)
1041 break;
fae3c158
RRD
1042 rmb();
1043 tmp = le32_to_cpup(&req->td->dmacount);
3e76fdcb 1044 if ((tmp & BIT(VALID_BIT)) != 0)
1da177e4
LT
1045 break;
1046
1047 /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
1048 * cases where DMA must be aborted; this code handles
1049 * all non-abort DMA completions.
1050 */
fae3c158 1051 if (unlikely(req->td->dmadesc == 0)) {
1da177e4 1052 /* paranoia */
fae3c158 1053 tmp = readl(&ep->dma->dmacount);
1da177e4
LT
1054 if (tmp & DMA_BYTE_COUNT_MASK)
1055 break;
1056 /* single transfer mode */
fae3c158 1057 dma_done(ep, req, tmp, 0);
1da177e4 1058 break;
ae8e530a 1059 } else if (!ep->is_in &&
43780aaa
RRD
1060 (req->req.length % ep->ep.maxpacket) &&
1061 !(ep->dev->quirks & PLX_SUPERSPEED)) {
1da177e4 1062
18a4e65f 1063 tmp = readl(&ep->regs->ep_stat);
1da177e4
LT
1064 /* AVOID TROUBLE HERE by not issuing short reads from
1065 * your gadget driver. That helps avoids errata 0121,
1066 * 0122, and 0124; not all cases trigger the warning.
1067 */
3e76fdcb 1068 if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) {
e56e69cc 1069 ep_warn(ep->dev, "%s lost packet sync!\n",
1da177e4
LT
1070 ep->ep.name);
1071 req->req.status = -EOVERFLOW;
fae3c158
RRD
1072 } else {
1073 tmp = readl(&ep->regs->ep_avail);
1074 if (tmp) {
1075 /* fifo gets flushed later */
1076 ep->out_overflow = 1;
e56e69cc 1077 ep_dbg(ep->dev,
fae3c158 1078 "%s dma, discard %d len %d\n",
1da177e4
LT
1079 ep->ep.name, tmp,
1080 req->req.length);
fae3c158
RRD
1081 req->req.status = -EOVERFLOW;
1082 }
1da177e4
LT
1083 }
1084 }
fae3c158 1085 dma_done(ep, req, tmp, 0);
1da177e4
LT
1086 }
1087}
1088
fae3c158 1089static void restart_dma(struct net2280_ep *ep)
1da177e4
LT
1090{
1091 struct net2280_request *req;
1da177e4
LT
1092
1093 if (ep->stopped)
1094 return;
fae3c158 1095 req = list_entry(ep->queue.next, struct net2280_request, queue);
1da177e4 1096
90664198 1097 start_dma(ep, req);
1da177e4
LT
1098}
1099
e721c457 1100static void abort_dma(struct net2280_ep *ep)
1da177e4
LT
1101{
1102 /* abort the current transfer */
fae3c158 1103 if (likely(!list_empty(&ep->queue))) {
1da177e4 1104 /* FIXME work around errata 0121, 0122, 0124 */
3e76fdcb 1105 writel(BIT(DMA_ABORT), &ep->dma->dmastat);
fae3c158 1106 spin_stop_dma(ep->dma);
1da177e4 1107 } else
fae3c158
RRD
1108 stop_dma(ep->dma);
1109 scan_dma_completions(ep);
1da177e4
LT
1110}
1111
1112/* dequeue ALL requests */
fae3c158 1113static void nuke(struct net2280_ep *ep)
1da177e4
LT
1114{
1115 struct net2280_request *req;
1116
1117 /* called with spinlock held */
1118 ep->stopped = 1;
1119 if (ep->dma)
fae3c158
RRD
1120 abort_dma(ep);
1121 while (!list_empty(&ep->queue)) {
1122 req = list_entry(ep->queue.next,
1da177e4
LT
1123 struct net2280_request,
1124 queue);
fae3c158 1125 done(ep, req, -ESHUTDOWN);
1da177e4
LT
1126 }
1127}
1128
1129/* dequeue JUST ONE request */
fae3c158 1130static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1da177e4
LT
1131{
1132 struct net2280_ep *ep;
1133 struct net2280_request *req;
1134 unsigned long flags;
1135 u32 dmactl;
1136 int stopped;
1137
fae3c158 1138 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
1139 if (!_ep || (!ep->desc && ep->num != 0) || !_req)
1140 return -EINVAL;
1141
fae3c158 1142 spin_lock_irqsave(&ep->dev->lock, flags);
1da177e4
LT
1143 stopped = ep->stopped;
1144
1145 /* quiesce dma while we patch the queue */
1146 dmactl = 0;
1147 ep->stopped = 1;
1148 if (ep->dma) {
fae3c158 1149 dmactl = readl(&ep->dma->dmactl);
1da177e4 1150 /* WARNING erratum 0127 may kick in ... */
fae3c158
RRD
1151 stop_dma(ep->dma);
1152 scan_dma_completions(ep);
1da177e4
LT
1153 }
1154
1155 /* make sure it's still queued on this endpoint */
fae3c158 1156 list_for_each_entry(req, &ep->queue, queue) {
1da177e4
LT
1157 if (&req->req == _req)
1158 break;
1159 }
1160 if (&req->req != _req) {
fae3c158 1161 spin_unlock_irqrestore(&ep->dev->lock, flags);
1da177e4
LT
1162 return -EINVAL;
1163 }
1164
1165 /* queue head may be partially complete. */
1166 if (ep->queue.next == &req->queue) {
1167 if (ep->dma) {
e56e69cc 1168 ep_dbg(ep->dev, "unlink (%s) dma\n", _ep->name);
1da177e4 1169 _req->status = -ECONNRESET;
fae3c158
RRD
1170 abort_dma(ep);
1171 if (likely(ep->queue.next == &req->queue)) {
1172 /* NOTE: misreports single-transfer mode*/
1da177e4 1173 req->td->dmacount = 0; /* invalidate */
fae3c158
RRD
1174 dma_done(ep, req,
1175 readl(&ep->dma->dmacount),
1da177e4
LT
1176 -ECONNRESET);
1177 }
1178 } else {
e56e69cc 1179 ep_dbg(ep->dev, "unlink (%s) pio\n", _ep->name);
fae3c158 1180 done(ep, req, -ECONNRESET);
1da177e4
LT
1181 }
1182 req = NULL;
1da177e4
LT
1183 }
1184
1185 if (req)
fae3c158 1186 done(ep, req, -ECONNRESET);
1da177e4
LT
1187 ep->stopped = stopped;
1188
1189 if (ep->dma) {
1190 /* turn off dma on inactive queues */
fae3c158
RRD
1191 if (list_empty(&ep->queue))
1192 stop_dma(ep->dma);
1da177e4
LT
1193 else if (!ep->stopped) {
1194 /* resume current request, or start new one */
1195 if (req)
fae3c158 1196 writel(dmactl, &ep->dma->dmactl);
1da177e4 1197 else
fae3c158 1198 start_dma(ep, list_entry(ep->queue.next,
1da177e4
LT
1199 struct net2280_request, queue));
1200 }
1201 }
1202
fae3c158 1203 spin_unlock_irqrestore(&ep->dev->lock, flags);
1da177e4
LT
1204 return 0;
1205}
1206
1207/*-------------------------------------------------------------------------*/
1208
fae3c158 1209static int net2280_fifo_status(struct usb_ep *_ep);
1da177e4
LT
1210
1211static int
8066134f 1212net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
1da177e4
LT
1213{
1214 struct net2280_ep *ep;
1215 unsigned long flags;
1216 int retval = 0;
1217
fae3c158 1218 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
1219 if (!_ep || (!ep->desc && ep->num != 0))
1220 return -EINVAL;
1221 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1222 return -ESHUTDOWN;
1223 if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03)
1224 == USB_ENDPOINT_XFER_ISOC)
1225 return -EINVAL;
1226
fae3c158
RRD
1227 spin_lock_irqsave(&ep->dev->lock, flags);
1228 if (!list_empty(&ep->queue))
1da177e4 1229 retval = -EAGAIN;
fae3c158 1230 else if (ep->is_in && value && net2280_fifo_status(_ep) != 0)
1da177e4
LT
1231 retval = -EAGAIN;
1232 else {
e56e69cc 1233 ep_vdbg(ep->dev, "%s %s %s\n", _ep->name,
8066134f
AS
1234 value ? "set" : "clear",
1235 wedged ? "wedge" : "halt");
1da177e4
LT
1236 /* set/clear, then synch memory views with the device */
1237 if (value) {
1238 if (ep->num == 0)
1239 ep->dev->protocol_stall = 1;
1240 else
fae3c158 1241 set_halt(ep);
8066134f
AS
1242 if (wedged)
1243 ep->wedged = 1;
1244 } else {
fae3c158 1245 clear_halt(ep);
2eeb0016 1246 if (ep->dev->quirks & PLX_SUPERSPEED &&
adc82f77
RRD
1247 !list_empty(&ep->queue) && ep->td_dma)
1248 restart_dma(ep);
8066134f
AS
1249 ep->wedged = 0;
1250 }
fae3c158 1251 (void) readl(&ep->regs->ep_rsp);
1da177e4 1252 }
fae3c158 1253 spin_unlock_irqrestore(&ep->dev->lock, flags);
1da177e4
LT
1254
1255 return retval;
1256}
1257
fae3c158 1258static int net2280_set_halt(struct usb_ep *_ep, int value)
8066134f
AS
1259{
1260 return net2280_set_halt_and_wedge(_ep, value, 0);
1261}
1262
fae3c158 1263static int net2280_set_wedge(struct usb_ep *_ep)
8066134f
AS
1264{
1265 if (!_ep || _ep->name == ep0name)
1266 return -EINVAL;
1267 return net2280_set_halt_and_wedge(_ep, 1, 1);
1268}
1269
fae3c158 1270static int net2280_fifo_status(struct usb_ep *_ep)
1da177e4
LT
1271{
1272 struct net2280_ep *ep;
1273 u32 avail;
1274
fae3c158 1275 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
1276 if (!_ep || (!ep->desc && ep->num != 0))
1277 return -ENODEV;
1278 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1279 return -ESHUTDOWN;
1280
3e76fdcb 1281 avail = readl(&ep->regs->ep_avail) & (BIT(12) - 1);
1da177e4
LT
1282 if (avail > ep->fifo_size)
1283 return -EOVERFLOW;
1284 if (ep->is_in)
1285 avail = ep->fifo_size - avail;
1286 return avail;
1287}
1288
fae3c158 1289static void net2280_fifo_flush(struct usb_ep *_ep)
1da177e4
LT
1290{
1291 struct net2280_ep *ep;
1292
fae3c158 1293 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
1294 if (!_ep || (!ep->desc && ep->num != 0))
1295 return;
1296 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1297 return;
1298
3e76fdcb 1299 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
fae3c158 1300 (void) readl(&ep->regs->ep_rsp);
1da177e4
LT
1301}
1302
901b3d75 1303static const struct usb_ep_ops net2280_ep_ops = {
1da177e4
LT
1304 .enable = net2280_enable,
1305 .disable = net2280_disable,
1306
1307 .alloc_request = net2280_alloc_request,
1308 .free_request = net2280_free_request,
1309
1da177e4
LT
1310 .queue = net2280_queue,
1311 .dequeue = net2280_dequeue,
1312
1313 .set_halt = net2280_set_halt,
8066134f 1314 .set_wedge = net2280_set_wedge,
1da177e4
LT
1315 .fifo_status = net2280_fifo_status,
1316 .fifo_flush = net2280_fifo_flush,
1317};
1318
1319/*-------------------------------------------------------------------------*/
1320
fae3c158 1321static int net2280_get_frame(struct usb_gadget *_gadget)
1da177e4
LT
1322{
1323 struct net2280 *dev;
1324 unsigned long flags;
1325 u16 retval;
1326
1327 if (!_gadget)
1328 return -ENODEV;
fae3c158
RRD
1329 dev = container_of(_gadget, struct net2280, gadget);
1330 spin_lock_irqsave(&dev->lock, flags);
1331 retval = get_idx_reg(dev->regs, REG_FRAME) & 0x03ff;
1332 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1333 return retval;
1334}
1335
fae3c158 1336static int net2280_wakeup(struct usb_gadget *_gadget)
1da177e4
LT
1337{
1338 struct net2280 *dev;
1339 u32 tmp;
1340 unsigned long flags;
1341
1342 if (!_gadget)
1343 return 0;
fae3c158 1344 dev = container_of(_gadget, struct net2280, gadget);
1da177e4 1345
fae3c158
RRD
1346 spin_lock_irqsave(&dev->lock, flags);
1347 tmp = readl(&dev->usb->usbctl);
3e76fdcb
RRD
1348 if (tmp & BIT(DEVICE_REMOTE_WAKEUP_ENABLE))
1349 writel(BIT(GENERATE_RESUME), &dev->usb->usbstat);
fae3c158 1350 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1351
1352 /* pci writes may still be posted */
1353 return 0;
1354}
1355
fae3c158 1356static int net2280_set_selfpowered(struct usb_gadget *_gadget, int value)
1da177e4
LT
1357{
1358 struct net2280 *dev;
1359 u32 tmp;
1360 unsigned long flags;
1361
1362 if (!_gadget)
1363 return 0;
fae3c158 1364 dev = container_of(_gadget, struct net2280, gadget);
1da177e4 1365
fae3c158
RRD
1366 spin_lock_irqsave(&dev->lock, flags);
1367 tmp = readl(&dev->usb->usbctl);
adc82f77 1368 if (value) {
3e76fdcb 1369 tmp |= BIT(SELF_POWERED_STATUS);
adc82f77
RRD
1370 dev->selfpowered = 1;
1371 } else {
3e76fdcb 1372 tmp &= ~BIT(SELF_POWERED_STATUS);
adc82f77
RRD
1373 dev->selfpowered = 0;
1374 }
fae3c158
RRD
1375 writel(tmp, &dev->usb->usbctl);
1376 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1377
1378 return 0;
1379}
1380
1381static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
1382{
1383 struct net2280 *dev;
1384 u32 tmp;
1385 unsigned long flags;
1386
1387 if (!_gadget)
1388 return -ENODEV;
fae3c158 1389 dev = container_of(_gadget, struct net2280, gadget);
1da177e4 1390
fae3c158
RRD
1391 spin_lock_irqsave(&dev->lock, flags);
1392 tmp = readl(&dev->usb->usbctl);
1da177e4
LT
1393 dev->softconnect = (is_on != 0);
1394 if (is_on)
3e76fdcb 1395 tmp |= BIT(USB_DETECT_ENABLE);
1da177e4 1396 else
3e76fdcb 1397 tmp &= ~BIT(USB_DETECT_ENABLE);
fae3c158
RRD
1398 writel(tmp, &dev->usb->usbctl);
1399 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1400
1401 return 0;
1402}
1403
4cf5e00b
FB
1404static int net2280_start(struct usb_gadget *_gadget,
1405 struct usb_gadget_driver *driver);
22835b80 1406static int net2280_stop(struct usb_gadget *_gadget);
0f91349b 1407
1da177e4
LT
1408static const struct usb_gadget_ops net2280_ops = {
1409 .get_frame = net2280_get_frame,
1410 .wakeup = net2280_wakeup,
1411 .set_selfpowered = net2280_set_selfpowered,
1412 .pullup = net2280_pullup,
4cf5e00b
FB
1413 .udc_start = net2280_start,
1414 .udc_stop = net2280_stop,
1da177e4
LT
1415};
1416
1417/*-------------------------------------------------------------------------*/
1418
b99b406c 1419#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1da177e4
LT
1420
1421/* FIXME move these into procfs, and use seq_file.
1422 * Sysfs _still_ doesn't behave for arbitrarily sized files,
1423 * and also doesn't help products using this with 2.4 kernels.
1424 */
1425
1426/* "function" sysfs attribute */
ce26bd23
GKH
1427static ssize_t function_show(struct device *_dev, struct device_attribute *attr,
1428 char *buf)
1da177e4 1429{
fae3c158 1430 struct net2280 *dev = dev_get_drvdata(_dev);
1da177e4 1431
fae3c158
RRD
1432 if (!dev->driver || !dev->driver->function ||
1433 strlen(dev->driver->function) > PAGE_SIZE)
1da177e4 1434 return 0;
fae3c158 1435 return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function);
1da177e4 1436}
ce26bd23 1437static DEVICE_ATTR_RO(function);
1da177e4 1438
ce26bd23
GKH
1439static ssize_t registers_show(struct device *_dev,
1440 struct device_attribute *attr, char *buf)
1da177e4
LT
1441{
1442 struct net2280 *dev;
1443 char *next;
1444 unsigned size, t;
1445 unsigned long flags;
1446 int i;
1447 u32 t1, t2;
30e69598 1448 const char *s;
1da177e4 1449
fae3c158 1450 dev = dev_get_drvdata(_dev);
1da177e4
LT
1451 next = buf;
1452 size = PAGE_SIZE;
fae3c158 1453 spin_lock_irqsave(&dev->lock, flags);
1da177e4
LT
1454
1455 if (dev->driver)
1456 s = dev->driver->driver.name;
1457 else
1458 s = "(none)";
1459
1460 /* Main Control Registers */
fae3c158 1461 t = scnprintf(next, size, "%s version " DRIVER_VERSION
d588ff58 1462 ", chiprev %04x\n\n"
1da177e4
LT
1463 "devinit %03x fifoctl %08x gadget '%s'\n"
1464 "pci irqenb0 %02x irqenb1 %08x "
1465 "irqstat0 %04x irqstat1 %08x\n",
1466 driver_name, dev->chiprev,
fae3c158
RRD
1467 readl(&dev->regs->devinit),
1468 readl(&dev->regs->fifoctl),
1da177e4 1469 s,
fae3c158
RRD
1470 readl(&dev->regs->pciirqenb0),
1471 readl(&dev->regs->pciirqenb1),
1472 readl(&dev->regs->irqstat0),
1473 readl(&dev->regs->irqstat1));
1da177e4
LT
1474 size -= t;
1475 next += t;
1476
1477 /* USB Control Registers */
fae3c158
RRD
1478 t1 = readl(&dev->usb->usbctl);
1479 t2 = readl(&dev->usb->usbstat);
3e76fdcb
RRD
1480 if (t1 & BIT(VBUS_PIN)) {
1481 if (t2 & BIT(HIGH_SPEED))
1da177e4
LT
1482 s = "high speed";
1483 else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1484 s = "powered";
1485 else
1486 s = "full speed";
1487 /* full speed bit (6) not working?? */
1488 } else
1489 s = "not attached";
fae3c158 1490 t = scnprintf(next, size,
1da177e4
LT
1491 "stdrsp %08x usbctl %08x usbstat %08x "
1492 "addr 0x%02x (%s)\n",
fae3c158
RRD
1493 readl(&dev->usb->stdrsp), t1, t2,
1494 readl(&dev->usb->ouraddr), s);
1da177e4
LT
1495 size -= t;
1496 next += t;
1497
1498 /* PCI Master Control Registers */
1499
1500 /* DMA Control Registers */
1501
1502 /* Configurable EP Control Registers */
adc82f77 1503 for (i = 0; i < dev->n_ep; i++) {
1da177e4
LT
1504 struct net2280_ep *ep;
1505
fae3c158 1506 ep = &dev->ep[i];
1da177e4
LT
1507 if (i && !ep->desc)
1508 continue;
1509
adc82f77 1510 t1 = readl(&ep->cfg->ep_cfg);
fae3c158
RRD
1511 t2 = readl(&ep->regs->ep_rsp) & 0xff;
1512 t = scnprintf(next, size,
1da177e4
LT
1513 "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s"
1514 "irqenb %02x\n",
1515 ep->ep.name, t1, t2,
3e76fdcb 1516 (t2 & BIT(CLEAR_NAK_OUT_PACKETS))
1da177e4 1517 ? "NAK " : "",
3e76fdcb 1518 (t2 & BIT(CLEAR_EP_HIDE_STATUS_PHASE))
1da177e4 1519 ? "hide " : "",
3e76fdcb 1520 (t2 & BIT(CLEAR_EP_FORCE_CRC_ERROR))
1da177e4 1521 ? "CRC " : "",
3e76fdcb 1522 (t2 & BIT(CLEAR_INTERRUPT_MODE))
1da177e4 1523 ? "interrupt " : "",
3e76fdcb 1524 (t2 & BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE))
1da177e4 1525 ? "status " : "",
3e76fdcb 1526 (t2 & BIT(CLEAR_NAK_OUT_PACKETS_MODE))
1da177e4 1527 ? "NAKmode " : "",
3e76fdcb 1528 (t2 & BIT(CLEAR_ENDPOINT_TOGGLE))
1da177e4 1529 ? "DATA1 " : "DATA0 ",
3e76fdcb 1530 (t2 & BIT(CLEAR_ENDPOINT_HALT))
1da177e4 1531 ? "HALT " : "",
fae3c158 1532 readl(&ep->regs->ep_irqenb));
1da177e4
LT
1533 size -= t;
1534 next += t;
1535
fae3c158 1536 t = scnprintf(next, size,
1da177e4
LT
1537 "\tstat %08x avail %04x "
1538 "(ep%d%s-%s)%s\n",
fae3c158
RRD
1539 readl(&ep->regs->ep_stat),
1540 readl(&ep->regs->ep_avail),
1541 t1 & 0x0f, DIR_STRING(t1),
1542 type_string(t1 >> 8),
1da177e4
LT
1543 ep->stopped ? "*" : "");
1544 size -= t;
1545 next += t;
1546
1547 if (!ep->dma)
1548 continue;
1549
fae3c158 1550 t = scnprintf(next, size,
1da177e4
LT
1551 " dma\tctl %08x stat %08x count %08x\n"
1552 "\taddr %08x desc %08x\n",
fae3c158
RRD
1553 readl(&ep->dma->dmactl),
1554 readl(&ep->dma->dmastat),
1555 readl(&ep->dma->dmacount),
1556 readl(&ep->dma->dmaaddr),
1557 readl(&ep->dma->dmadesc));
1da177e4
LT
1558 size -= t;
1559 next += t;
1560
1561 }
1562
fae3c158 1563 /* Indexed Registers (none yet) */
1da177e4
LT
1564
1565 /* Statistics */
fae3c158 1566 t = scnprintf(next, size, "\nirqs: ");
1da177e4
LT
1567 size -= t;
1568 next += t;
adc82f77 1569 for (i = 0; i < dev->n_ep; i++) {
1da177e4
LT
1570 struct net2280_ep *ep;
1571
fae3c158 1572 ep = &dev->ep[i];
1da177e4
LT
1573 if (i && !ep->irqs)
1574 continue;
fae3c158 1575 t = scnprintf(next, size, " %s/%lu", ep->ep.name, ep->irqs);
1da177e4
LT
1576 size -= t;
1577 next += t;
1578
1579 }
fae3c158 1580 t = scnprintf(next, size, "\n");
1da177e4
LT
1581 size -= t;
1582 next += t;
1583
fae3c158 1584 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1585
1586 return PAGE_SIZE - size;
1587}
ce26bd23 1588static DEVICE_ATTR_RO(registers);
1da177e4 1589
ce26bd23
GKH
1590static ssize_t queues_show(struct device *_dev, struct device_attribute *attr,
1591 char *buf)
1da177e4
LT
1592{
1593 struct net2280 *dev;
1594 char *next;
1595 unsigned size;
1596 unsigned long flags;
1597 int i;
1598
fae3c158 1599 dev = dev_get_drvdata(_dev);
1da177e4
LT
1600 next = buf;
1601 size = PAGE_SIZE;
fae3c158 1602 spin_lock_irqsave(&dev->lock, flags);
1da177e4 1603
adc82f77 1604 for (i = 0; i < dev->n_ep; i++) {
fae3c158 1605 struct net2280_ep *ep = &dev->ep[i];
1da177e4
LT
1606 struct net2280_request *req;
1607 int t;
1608
1609 if (i != 0) {
1610 const struct usb_endpoint_descriptor *d;
1611
1612 d = ep->desc;
1613 if (!d)
1614 continue;
1615 t = d->bEndpointAddress;
fae3c158 1616 t = scnprintf(next, size,
1da177e4
LT
1617 "\n%s (ep%d%s-%s) max %04x %s fifo %d\n",
1618 ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
1619 (t & USB_DIR_IN) ? "in" : "out",
a27f37a1 1620 type_string(d->bmAttributes),
fae3c158 1621 usb_endpoint_maxp(d) & 0x1fff,
1da177e4
LT
1622 ep->dma ? "dma" : "pio", ep->fifo_size
1623 );
1624 } else /* ep0 should only have one transfer queued */
fae3c158 1625 t = scnprintf(next, size, "ep0 max 64 pio %s\n",
1da177e4
LT
1626 ep->is_in ? "in" : "out");
1627 if (t <= 0 || t > size)
1628 goto done;
1629 size -= t;
1630 next += t;
1631
fae3c158
RRD
1632 if (list_empty(&ep->queue)) {
1633 t = scnprintf(next, size, "\t(nothing queued)\n");
1da177e4
LT
1634 if (t <= 0 || t > size)
1635 goto done;
1636 size -= t;
1637 next += t;
1638 continue;
1639 }
fae3c158
RRD
1640 list_for_each_entry(req, &ep->queue, queue) {
1641 if (ep->dma && req->td_dma == readl(&ep->dma->dmadesc))
1642 t = scnprintf(next, size,
1da177e4
LT
1643 "\treq %p len %d/%d "
1644 "buf %p (dmacount %08x)\n",
1645 &req->req, req->req.actual,
1646 req->req.length, req->req.buf,
fae3c158 1647 readl(&ep->dma->dmacount));
1da177e4 1648 else
fae3c158 1649 t = scnprintf(next, size,
1da177e4
LT
1650 "\treq %p len %d/%d buf %p\n",
1651 &req->req, req->req.actual,
1652 req->req.length, req->req.buf);
1653 if (t <= 0 || t > size)
1654 goto done;
1655 size -= t;
1656 next += t;
1657
1658 if (ep->dma) {
1659 struct net2280_dma *td;
1660
1661 td = req->td;
fae3c158 1662 t = scnprintf(next, size, "\t td %08x "
1da177e4
LT
1663 " count %08x buf %08x desc %08x\n",
1664 (u32) req->td_dma,
fae3c158
RRD
1665 le32_to_cpu(td->dmacount),
1666 le32_to_cpu(td->dmaaddr),
1667 le32_to_cpu(td->dmadesc));
1da177e4
LT
1668 if (t <= 0 || t > size)
1669 goto done;
1670 size -= t;
1671 next += t;
1672 }
1673 }
1674 }
1675
1676done:
fae3c158 1677 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1678 return PAGE_SIZE - size;
1679}
ce26bd23 1680static DEVICE_ATTR_RO(queues);
1da177e4
LT
1681
1682
1683#else
1684
fae3c158
RRD
1685#define device_create_file(a, b) (0)
1686#define device_remove_file(a, b) do { } while (0)
1da177e4
LT
1687
1688#endif
1689
1690/*-------------------------------------------------------------------------*/
1691
1692/* another driver-specific mode might be a request type doing dma
1693 * to/from another device fifo instead of to/from memory.
1694 */
1695
fae3c158 1696static void set_fifo_mode(struct net2280 *dev, int mode)
1da177e4
LT
1697{
1698 /* keeping high bits preserves BAR2 */
fae3c158 1699 writel((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl);
1da177e4
LT
1700
1701 /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */
fae3c158
RRD
1702 INIT_LIST_HEAD(&dev->gadget.ep_list);
1703 list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1704 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1da177e4
LT
1705 switch (mode) {
1706 case 0:
fae3c158
RRD
1707 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1708 list_add_tail(&dev->ep[4].ep.ep_list, &dev->gadget.ep_list);
1709 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1da177e4
LT
1710 break;
1711 case 1:
fae3c158 1712 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 2048;
1da177e4
LT
1713 break;
1714 case 2:
fae3c158
RRD
1715 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1716 dev->ep[1].fifo_size = 2048;
1717 dev->ep[2].fifo_size = 1024;
1da177e4
LT
1718 break;
1719 }
1720 /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */
fae3c158
RRD
1721 list_add_tail(&dev->ep[5].ep.ep_list, &dev->gadget.ep_list);
1722 list_add_tail(&dev->ep[6].ep.ep_list, &dev->gadget.ep_list);
1da177e4
LT
1723}
1724
adc82f77
RRD
1725static void defect7374_disable_data_eps(struct net2280 *dev)
1726{
1727 /*
1728 * For Defect 7374, disable data EPs (and more):
1729 * - This phase undoes the earlier phase of the Defect 7374 workaround,
1730 * returing ep regs back to normal.
1731 */
1732 struct net2280_ep *ep;
1733 int i;
1734 unsigned char ep_sel;
1735 u32 tmp_reg;
1736
1737 for (i = 1; i < 5; i++) {
1738 ep = &dev->ep[i];
1739 writel(0, &ep->cfg->ep_cfg);
1740 }
1741
1742 /* CSROUT, CSRIN, PCIOUT, PCIIN, STATIN, RCIN */
1743 for (i = 0; i < 6; i++)
1744 writel(0, &dev->dep[i].dep_cfg);
1745
1746 for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
1747 /* Select an endpoint for subsequent operations: */
1748 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
1749 writel(((tmp_reg & ~0x1f) | ep_sel), &dev->plregs->pl_ep_ctrl);
1750
1751 if (ep_sel < 2 || (ep_sel > 9 && ep_sel < 14) ||
1752 ep_sel == 18 || ep_sel == 20)
1753 continue;
1754
1755 /* Change settings on some selected endpoints */
1756 tmp_reg = readl(&dev->plregs->pl_ep_cfg_4);
3e76fdcb 1757 tmp_reg &= ~BIT(NON_CTRL_IN_TOLERATE_BAD_DIR);
adc82f77
RRD
1758 writel(tmp_reg, &dev->plregs->pl_ep_cfg_4);
1759 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
3e76fdcb 1760 tmp_reg |= BIT(EP_INITIALIZED);
adc82f77
RRD
1761 writel(tmp_reg, &dev->plregs->pl_ep_ctrl);
1762 }
1763}
1764
1765static void defect7374_enable_data_eps_zero(struct net2280 *dev)
1766{
1767 u32 tmp = 0, tmp_reg;
5517525e 1768 u32 scratch;
adc82f77
RRD
1769 int i;
1770 unsigned char ep_sel;
1771
1772 scratch = get_idx_reg(dev->regs, SCRATCH);
5517525e
RRD
1773
1774 WARN_ON((scratch & (0xf << DEFECT7374_FSM_FIELD))
1775 == DEFECT7374_FSM_SS_CONTROL_READ);
1776
adc82f77
RRD
1777 scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
1778
5517525e
RRD
1779 ep_warn(dev, "Operate Defect 7374 workaround soft this time");
1780 ep_warn(dev, "It will operate on cold-reboot and SS connect");
adc82f77 1781
5517525e
RRD
1782 /*GPEPs:*/
1783 tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_DIRECTION) |
1784 (2 << OUT_ENDPOINT_TYPE) | (2 << IN_ENDPOINT_TYPE) |
1785 ((dev->enhanced_mode) ?
1786 BIT(OUT_ENDPOINT_ENABLE) : BIT(ENDPOINT_ENABLE)) |
1787 BIT(IN_ENDPOINT_ENABLE));
adc82f77 1788
5517525e
RRD
1789 for (i = 1; i < 5; i++)
1790 writel(tmp, &dev->ep[i].cfg->ep_cfg);
adc82f77 1791
5517525e
RRD
1792 /* CSRIN, PCIIN, STATIN, RCIN*/
1793 tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_ENABLE));
1794 writel(tmp, &dev->dep[1].dep_cfg);
1795 writel(tmp, &dev->dep[3].dep_cfg);
1796 writel(tmp, &dev->dep[4].dep_cfg);
1797 writel(tmp, &dev->dep[5].dep_cfg);
adc82f77 1798
5517525e
RRD
1799 /*Implemented for development and debug.
1800 * Can be refined/tuned later.*/
1801 for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
1802 /* Select an endpoint for subsequent operations: */
1803 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
1804 writel(((tmp_reg & ~0x1f) | ep_sel),
1805 &dev->plregs->pl_ep_ctrl);
1806
1807 if (ep_sel == 1) {
1808 tmp =
1809 (readl(&dev->plregs->pl_ep_ctrl) |
1810 BIT(CLEAR_ACK_ERROR_CODE) | 0);
1811 writel(tmp, &dev->plregs->pl_ep_ctrl);
1812 continue;
adc82f77
RRD
1813 }
1814
5517525e
RRD
1815 if (ep_sel == 0 || (ep_sel > 9 && ep_sel < 14) ||
1816 ep_sel == 18 || ep_sel == 20)
1817 continue;
1818
1819 tmp = (readl(&dev->plregs->pl_ep_cfg_4) |
1820 BIT(NON_CTRL_IN_TOLERATE_BAD_DIR) | 0);
1821 writel(tmp, &dev->plregs->pl_ep_cfg_4);
1822
1823 tmp = readl(&dev->plregs->pl_ep_ctrl) &
1824 ~BIT(EP_INITIALIZED);
1825 writel(tmp, &dev->plregs->pl_ep_ctrl);
adc82f77 1826
adc82f77 1827 }
5517525e
RRD
1828
1829 /* Set FSM to focus on the first Control Read:
1830 * - Tip: Connection speed is known upon the first
1831 * setup request.*/
1832 scratch |= DEFECT7374_FSM_WAITING_FOR_CONTROL_READ;
1833 set_idx_reg(dev->regs, SCRATCH, scratch);
1834
adc82f77
RRD
1835}
1836
1da177e4
LT
1837/* keeping it simple:
1838 * - one bus driver, initted first;
1839 * - one function driver, initted second
1840 *
1841 * most of the work to support multiple net2280 controllers would
1842 * be to associate this gadget driver (yes?) with all of them, or
1843 * perhaps to bind specific drivers to specific devices.
1844 */
1845
adc82f77 1846static void usb_reset_228x(struct net2280 *dev)
1da177e4
LT
1847{
1848 u32 tmp;
1849
1850 dev->gadget.speed = USB_SPEED_UNKNOWN;
fae3c158 1851 (void) readl(&dev->usb->usbctl);
1da177e4 1852
fae3c158 1853 net2280_led_init(dev);
1da177e4
LT
1854
1855 /* disable automatic responses, and irqs */
fae3c158
RRD
1856 writel(0, &dev->usb->stdrsp);
1857 writel(0, &dev->regs->pciirqenb0);
1858 writel(0, &dev->regs->pciirqenb1);
1da177e4
LT
1859
1860 /* clear old dma and irq state */
1861 for (tmp = 0; tmp < 4; tmp++) {
adc82f77 1862 struct net2280_ep *ep = &dev->ep[tmp + 1];
1da177e4 1863 if (ep->dma)
adc82f77 1864 abort_dma(ep);
1da177e4 1865 }
adc82f77 1866
fae3c158 1867 writel(~0, &dev->regs->irqstat0),
3e76fdcb 1868 writel(~(u32)BIT(SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1),
1da177e4
LT
1869
1870 /* reset, and enable pci */
3e76fdcb
RRD
1871 tmp = readl(&dev->regs->devinit) |
1872 BIT(PCI_ENABLE) |
1873 BIT(FIFO_SOFT_RESET) |
1874 BIT(USB_SOFT_RESET) |
1875 BIT(M8051_RESET);
fae3c158 1876 writel(tmp, &dev->regs->devinit);
1da177e4
LT
1877
1878 /* standard fifo and endpoint allocations */
fae3c158 1879 set_fifo_mode(dev, (fifo_mode <= 2) ? fifo_mode : 0);
1da177e4
LT
1880}
1881
adc82f77
RRD
1882static void usb_reset_338x(struct net2280 *dev)
1883{
1884 u32 tmp;
adc82f77
RRD
1885
1886 dev->gadget.speed = USB_SPEED_UNKNOWN;
1887 (void)readl(&dev->usb->usbctl);
1888
1889 net2280_led_init(dev);
1890
5517525e 1891 if (dev->bug7734_patched) {
adc82f77
RRD
1892 /* disable automatic responses, and irqs */
1893 writel(0, &dev->usb->stdrsp);
1894 writel(0, &dev->regs->pciirqenb0);
1895 writel(0, &dev->regs->pciirqenb1);
1896 }
1897
1898 /* clear old dma and irq state */
1899 for (tmp = 0; tmp < 4; tmp++) {
1900 struct net2280_ep *ep = &dev->ep[tmp + 1];
1901
1902 if (ep->dma)
1903 abort_dma(ep);
1904 }
1905
1906 writel(~0, &dev->regs->irqstat0), writel(~0, &dev->regs->irqstat1);
1907
5517525e 1908 if (dev->bug7734_patched) {
adc82f77
RRD
1909 /* reset, and enable pci */
1910 tmp = readl(&dev->regs->devinit) |
3e76fdcb
RRD
1911 BIT(PCI_ENABLE) |
1912 BIT(FIFO_SOFT_RESET) |
1913 BIT(USB_SOFT_RESET) |
1914 BIT(M8051_RESET);
adc82f77
RRD
1915
1916 writel(tmp, &dev->regs->devinit);
1917 }
1918
1919 /* always ep-{1,2,3,4} ... maybe not ep-3 or ep-4 */
1920 INIT_LIST_HEAD(&dev->gadget.ep_list);
1921
1922 for (tmp = 1; tmp < dev->n_ep; tmp++)
1923 list_add_tail(&dev->ep[tmp].ep.ep_list, &dev->gadget.ep_list);
1924
1925}
1926
1927static void usb_reset(struct net2280 *dev)
1928{
2eeb0016 1929 if (dev->quirks & PLX_LEGACY)
adc82f77
RRD
1930 return usb_reset_228x(dev);
1931 return usb_reset_338x(dev);
1932}
1933
1934static void usb_reinit_228x(struct net2280 *dev)
1da177e4
LT
1935{
1936 u32 tmp;
1da177e4
LT
1937
1938 /* basic endpoint init */
1939 for (tmp = 0; tmp < 7; tmp++) {
fae3c158 1940 struct net2280_ep *ep = &dev->ep[tmp];
1da177e4 1941
fae3c158 1942 ep->ep.name = ep_name[tmp];
1da177e4
LT
1943 ep->dev = dev;
1944 ep->num = tmp;
1945
1946 if (tmp > 0 && tmp <= 4) {
1947 ep->fifo_size = 1024;
d588ff58 1948 ep->dma = &dev->dma[tmp - 1];
1da177e4
LT
1949 } else
1950 ep->fifo_size = 64;
fae3c158 1951 ep->regs = &dev->epregs[tmp];
adc82f77
RRD
1952 ep->cfg = &dev->epregs[tmp];
1953 ep_reset_228x(dev->regs, ep);
1da177e4 1954 }
fae3c158
RRD
1955 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
1956 usb_ep_set_maxpacket_limit(&dev->ep[5].ep, 64);
1957 usb_ep_set_maxpacket_limit(&dev->ep[6].ep, 64);
1da177e4 1958
fae3c158
RRD
1959 dev->gadget.ep0 = &dev->ep[0].ep;
1960 dev->ep[0].stopped = 0;
1961 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1da177e4
LT
1962
1963 /* we want to prevent lowlevel/insecure access from the USB host,
1964 * but erratum 0119 means this enable bit is ignored
1965 */
1966 for (tmp = 0; tmp < 5; tmp++)
fae3c158 1967 writel(EP_DONTUSE, &dev->dep[tmp].dep_cfg);
1da177e4
LT
1968}
1969
adc82f77
RRD
1970static void usb_reinit_338x(struct net2280 *dev)
1971{
adc82f77
RRD
1972 int i;
1973 u32 tmp, val;
adc82f77
RRD
1974 static const u32 ne[9] = { 0, 1, 2, 3, 4, 1, 2, 3, 4 };
1975 static const u32 ep_reg_addr[9] = { 0x00, 0xC0, 0x00, 0xC0, 0x00,
1976 0x00, 0xC0, 0x00, 0xC0 };
1977
adc82f77
RRD
1978 /* basic endpoint init */
1979 for (i = 0; i < dev->n_ep; i++) {
1980 struct net2280_ep *ep = &dev->ep[i];
1981
1982 ep->ep.name = ep_name[i];
1983 ep->dev = dev;
1984 ep->num = i;
1985
d588ff58 1986 if (i > 0 && i <= 4)
adc82f77
RRD
1987 ep->dma = &dev->dma[i - 1];
1988
1989 if (dev->enhanced_mode) {
1990 ep->cfg = &dev->epregs[ne[i]];
1991 ep->regs = (struct net2280_ep_regs __iomem *)
c43e97b2 1992 (((void __iomem *)&dev->epregs[ne[i]]) +
adc82f77
RRD
1993 ep_reg_addr[i]);
1994 ep->fiforegs = &dev->fiforegs[i];
1995 } else {
1996 ep->cfg = &dev->epregs[i];
1997 ep->regs = &dev->epregs[i];
1998 ep->fiforegs = &dev->fiforegs[i];
1999 }
2000
2001 ep->fifo_size = (i != 0) ? 2048 : 512;
2002
2003 ep_reset_338x(dev->regs, ep);
2004 }
2005 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 512);
2006
2007 dev->gadget.ep0 = &dev->ep[0].ep;
2008 dev->ep[0].stopped = 0;
2009
2010 /* Link layer set up */
5517525e 2011 if (dev->bug7734_patched) {
adc82f77 2012 tmp = readl(&dev->usb_ext->usbctl2) &
3e76fdcb 2013 ~(BIT(U1_ENABLE) | BIT(U2_ENABLE) | BIT(LTM_ENABLE));
adc82f77
RRD
2014 writel(tmp, &dev->usb_ext->usbctl2);
2015 }
2016
2017 /* Hardware Defect and Workaround */
2018 val = readl(&dev->ll_lfps_regs->ll_lfps_5);
2019 val &= ~(0xf << TIMER_LFPS_6US);
2020 val |= 0x5 << TIMER_LFPS_6US;
2021 writel(val, &dev->ll_lfps_regs->ll_lfps_5);
2022
2023 val = readl(&dev->ll_lfps_regs->ll_lfps_6);
2024 val &= ~(0xffff << TIMER_LFPS_80US);
2025 val |= 0x0100 << TIMER_LFPS_80US;
2026 writel(val, &dev->ll_lfps_regs->ll_lfps_6);
2027
2028 /*
2029 * AA_AB Errata. Issue 4. Workaround for SuperSpeed USB
2030 * Hot Reset Exit Handshake may Fail in Specific Case using
2031 * Default Register Settings. Workaround for Enumeration test.
2032 */
2033 val = readl(&dev->ll_tsn_regs->ll_tsn_counters_2);
2034 val &= ~(0x1f << HOT_TX_NORESET_TS2);
2035 val |= 0x10 << HOT_TX_NORESET_TS2;
2036 writel(val, &dev->ll_tsn_regs->ll_tsn_counters_2);
2037
2038 val = readl(&dev->ll_tsn_regs->ll_tsn_counters_3);
2039 val &= ~(0x1f << HOT_RX_RESET_TS2);
2040 val |= 0x3 << HOT_RX_RESET_TS2;
2041 writel(val, &dev->ll_tsn_regs->ll_tsn_counters_3);
2042
2043 /*
2044 * Set Recovery Idle to Recover bit:
2045 * - On SS connections, setting Recovery Idle to Recover Fmw improves
2046 * link robustness with various hosts and hubs.
2047 * - It is safe to set for all connection speeds; all chip revisions.
2048 * - R-M-W to leave other bits undisturbed.
2049 * - Reference PLX TT-7372
2050 */
2051 val = readl(&dev->ll_chicken_reg->ll_tsn_chicken_bit);
3e76fdcb 2052 val |= BIT(RECOVERY_IDLE_TO_RECOVER_FMW);
adc82f77
RRD
2053 writel(val, &dev->ll_chicken_reg->ll_tsn_chicken_bit);
2054
2055 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2056
2057 /* disable dedicated endpoints */
2058 writel(0x0D, &dev->dep[0].dep_cfg);
2059 writel(0x0D, &dev->dep[1].dep_cfg);
2060 writel(0x0E, &dev->dep[2].dep_cfg);
2061 writel(0x0E, &dev->dep[3].dep_cfg);
2062 writel(0x0F, &dev->dep[4].dep_cfg);
2063 writel(0x0C, &dev->dep[5].dep_cfg);
2064}
2065
2066static void usb_reinit(struct net2280 *dev)
2067{
2eeb0016 2068 if (dev->quirks & PLX_LEGACY)
adc82f77
RRD
2069 return usb_reinit_228x(dev);
2070 return usb_reinit_338x(dev);
2071}
2072
2073static void ep0_start_228x(struct net2280 *dev)
1da177e4 2074{
3e76fdcb
RRD
2075 writel(BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
2076 BIT(CLEAR_NAK_OUT_PACKETS) |
ae8e530a
RRD
2077 BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE),
2078 &dev->epregs[0].ep_rsp);
1da177e4
LT
2079
2080 /*
2081 * hardware optionally handles a bunch of standard requests
2082 * that the API hides from drivers anyway. have it do so.
2083 * endpoint status/features are handled in software, to
2084 * help pass tests for some dubious behavior.
2085 */
3e76fdcb
RRD
2086 writel(BIT(SET_TEST_MODE) |
2087 BIT(SET_ADDRESS) |
2088 BIT(DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP) |
2089 BIT(GET_DEVICE_STATUS) |
ae8e530a
RRD
2090 BIT(GET_INTERFACE_STATUS),
2091 &dev->usb->stdrsp);
3e76fdcb
RRD
2092 writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
2093 BIT(SELF_POWERED_USB_DEVICE) |
2094 BIT(REMOTE_WAKEUP_SUPPORT) |
2095 (dev->softconnect << USB_DETECT_ENABLE) |
2096 BIT(SELF_POWERED_STATUS),
2097 &dev->usb->usbctl);
1da177e4
LT
2098
2099 /* enable irqs so we can see ep0 and general operation */
3e76fdcb
RRD
2100 writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
2101 BIT(ENDPOINT_0_INTERRUPT_ENABLE),
2102 &dev->regs->pciirqenb0);
2103 writel(BIT(PCI_INTERRUPT_ENABLE) |
2104 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE) |
2105 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE) |
2106 BIT(PCI_RETRY_ABORT_INTERRUPT_ENABLE) |
2107 BIT(VBUS_INTERRUPT_ENABLE) |
2108 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
2109 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE),
2110 &dev->regs->pciirqenb1);
1da177e4
LT
2111
2112 /* don't leave any writes posted */
fae3c158 2113 (void) readl(&dev->usb->usbctl);
1da177e4
LT
2114}
2115
adc82f77
RRD
2116static void ep0_start_338x(struct net2280 *dev)
2117{
adc82f77 2118
5517525e 2119 if (dev->bug7734_patched)
3e76fdcb
RRD
2120 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
2121 BIT(SET_EP_HIDE_STATUS_PHASE),
adc82f77
RRD
2122 &dev->epregs[0].ep_rsp);
2123
2124 /*
2125 * hardware optionally handles a bunch of standard requests
2126 * that the API hides from drivers anyway. have it do so.
2127 * endpoint status/features are handled in software, to
2128 * help pass tests for some dubious behavior.
2129 */
3e76fdcb
RRD
2130 writel(BIT(SET_ISOCHRONOUS_DELAY) |
2131 BIT(SET_SEL) |
2132 BIT(SET_TEST_MODE) |
2133 BIT(SET_ADDRESS) |
2134 BIT(GET_INTERFACE_STATUS) |
2135 BIT(GET_DEVICE_STATUS),
adc82f77
RRD
2136 &dev->usb->stdrsp);
2137 dev->wakeup_enable = 1;
3e76fdcb 2138 writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
adc82f77 2139 (dev->softconnect << USB_DETECT_ENABLE) |
3e76fdcb 2140 BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
adc82f77
RRD
2141 &dev->usb->usbctl);
2142
2143 /* enable irqs so we can see ep0 and general operation */
3e76fdcb 2144 writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
ae8e530a
RRD
2145 BIT(ENDPOINT_0_INTERRUPT_ENABLE),
2146 &dev->regs->pciirqenb0);
3e76fdcb
RRD
2147 writel(BIT(PCI_INTERRUPT_ENABLE) |
2148 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
2149 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE) |
2150 BIT(VBUS_INTERRUPT_ENABLE),
adc82f77
RRD
2151 &dev->regs->pciirqenb1);
2152
2153 /* don't leave any writes posted */
2154 (void)readl(&dev->usb->usbctl);
2155}
2156
2157static void ep0_start(struct net2280 *dev)
2158{
2eeb0016 2159 if (dev->quirks & PLX_LEGACY)
adc82f77
RRD
2160 return ep0_start_228x(dev);
2161 return ep0_start_338x(dev);
2162}
2163
1da177e4
LT
2164/* when a driver is successfully registered, it will receive
2165 * control requests including set_configuration(), which enables
2166 * non-control requests. then usb traffic follows until a
2167 * disconnect is reported. then a host may connect again, or
2168 * the driver might get unbound.
2169 */
4cf5e00b
FB
2170static int net2280_start(struct usb_gadget *_gadget,
2171 struct usb_gadget_driver *driver)
1da177e4 2172{
4cf5e00b 2173 struct net2280 *dev;
1da177e4
LT
2174 int retval;
2175 unsigned i;
2176
2177 /* insist on high speed support from the driver, since
2178 * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
2179 * "must not be used in normal operation"
2180 */
ae8e530a
RRD
2181 if (!driver || driver->max_speed < USB_SPEED_HIGH ||
2182 !driver->setup)
1da177e4 2183 return -EINVAL;
4cf5e00b 2184
fae3c158 2185 dev = container_of(_gadget, struct net2280, gadget);
1da177e4 2186
adc82f77 2187 for (i = 0; i < dev->n_ep; i++)
fae3c158 2188 dev->ep[i].irqs = 0;
1da177e4
LT
2189
2190 /* hook up the driver ... */
2191 dev->softconnect = 1;
2192 driver->driver.bus = NULL;
2193 dev->driver = driver;
1da177e4 2194
fae3c158
RRD
2195 retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
2196 if (retval)
2197 goto err_unbind;
2198 retval = device_create_file(&dev->pdev->dev, &dev_attr_queues);
2199 if (retval)
2200 goto err_func;
1da177e4 2201
7a74c481 2202 /* enable host detection and ep0; and we're ready
1da177e4
LT
2203 * for set_configuration as well as eventual disconnect.
2204 */
fae3c158 2205 net2280_led_active(dev, 1);
adc82f77 2206
5517525e 2207 if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched)
adc82f77
RRD
2208 defect7374_enable_data_eps_zero(dev);
2209
fae3c158 2210 ep0_start(dev);
1da177e4 2211
1da177e4
LT
2212 /* pci writes may still be posted */
2213 return 0;
b3899dac
JG
2214
2215err_func:
fae3c158 2216 device_remove_file(&dev->pdev->dev, &dev_attr_function);
b3899dac 2217err_unbind:
b3899dac
JG
2218 dev->driver = NULL;
2219 return retval;
1da177e4 2220}
1da177e4 2221
fae3c158 2222static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
1da177e4
LT
2223{
2224 int i;
2225
2226 /* don't disconnect if it's not connected */
2227 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
2228 driver = NULL;
2229
2230 /* stop hardware; prevent new request submissions;
2231 * and kill any outstanding requests.
2232 */
fae3c158 2233 usb_reset(dev);
adc82f77 2234 for (i = 0; i < dev->n_ep; i++)
fae3c158 2235 nuke(&dev->ep[i]);
1da177e4 2236
699412d9
FB
2237 /* report disconnect; the driver is already quiesced */
2238 if (driver) {
2239 spin_unlock(&dev->lock);
2240 driver->disconnect(&dev->gadget);
2241 spin_lock(&dev->lock);
2242 }
2243
fae3c158 2244 usb_reinit(dev);
1da177e4
LT
2245}
2246
22835b80 2247static int net2280_stop(struct usb_gadget *_gadget)
1da177e4 2248{
4cf5e00b 2249 struct net2280 *dev;
1da177e4
LT
2250 unsigned long flags;
2251
fae3c158 2252 dev = container_of(_gadget, struct net2280, gadget);
1da177e4 2253
fae3c158 2254 spin_lock_irqsave(&dev->lock, flags);
bfd0ed57 2255 stop_activity(dev, NULL);
fae3c158 2256 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4 2257
fae3c158 2258 net2280_led_active(dev, 0);
2f076077 2259
fae3c158
RRD
2260 device_remove_file(&dev->pdev->dev, &dev_attr_function);
2261 device_remove_file(&dev->pdev->dev, &dev_attr_queues);
1da177e4 2262
bfd0ed57 2263 dev->driver = NULL;
84237bfb 2264
1da177e4
LT
2265 return 0;
2266}
1da177e4
LT
2267
2268/*-------------------------------------------------------------------------*/
2269
2270/* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
2271 * also works for dma-capable endpoints, in pio mode or just
2272 * to manually advance the queue after short OUT transfers.
2273 */
fae3c158 2274static void handle_ep_small(struct net2280_ep *ep)
1da177e4
LT
2275{
2276 struct net2280_request *req;
2277 u32 t;
2278 /* 0 error, 1 mid-data, 2 done */
2279 int mode = 1;
2280
fae3c158
RRD
2281 if (!list_empty(&ep->queue))
2282 req = list_entry(ep->queue.next,
1da177e4
LT
2283 struct net2280_request, queue);
2284 else
2285 req = NULL;
2286
2287 /* ack all, and handle what we care about */
fae3c158 2288 t = readl(&ep->regs->ep_stat);
1da177e4
LT
2289 ep->irqs++;
2290#if 0
e56e69cc 2291 ep_vdbg(ep->dev, "%s ack ep_stat %08x, req %p\n",
1da177e4
LT
2292 ep->ep.name, t, req ? &req->req : 0);
2293#endif
2eeb0016 2294 if (!ep->is_in || (ep->dev->quirks & PLX_2280))
3e76fdcb 2295 writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat);
950ee4c8
GL
2296 else
2297 /* Added for 2282 */
fae3c158 2298 writel(t, &ep->regs->ep_stat);
1da177e4
LT
2299
2300 /* for ep0, monitor token irqs to catch data stage length errors
2301 * and to synchronize on status.
2302 *
2303 * also, to defer reporting of protocol stalls ... here's where
2304 * data or status first appears, handling stalls here should never
2305 * cause trouble on the host side..
2306 *
2307 * control requests could be slightly faster without token synch for
2308 * status, but status can jam up that way.
2309 */
fae3c158 2310 if (unlikely(ep->num == 0)) {
1da177e4
LT
2311 if (ep->is_in) {
2312 /* status; stop NAKing */
3e76fdcb 2313 if (t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) {
1da177e4
LT
2314 if (ep->dev->protocol_stall) {
2315 ep->stopped = 1;
fae3c158 2316 set_halt(ep);
1da177e4
LT
2317 }
2318 if (!req)
fae3c158 2319 allow_status(ep);
1da177e4
LT
2320 mode = 2;
2321 /* reply to extra IN data tokens with a zlp */
3e76fdcb 2322 } else if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
1da177e4
LT
2323 if (ep->dev->protocol_stall) {
2324 ep->stopped = 1;
fae3c158 2325 set_halt(ep);
1da177e4 2326 mode = 2;
1f26e28d
AS
2327 } else if (ep->responded &&
2328 !req && !ep->stopped)
fae3c158 2329 write_fifo(ep, NULL);
1da177e4
LT
2330 }
2331 } else {
2332 /* status; stop NAKing */
3e76fdcb 2333 if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
1da177e4
LT
2334 if (ep->dev->protocol_stall) {
2335 ep->stopped = 1;
fae3c158 2336 set_halt(ep);
1da177e4
LT
2337 }
2338 mode = 2;
2339 /* an extra OUT token is an error */
ae8e530a
RRD
2340 } else if (((t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) &&
2341 req &&
2342 req->req.actual == req->req.length) ||
2343 (ep->responded && !req)) {
1da177e4 2344 ep->dev->protocol_stall = 1;
fae3c158 2345 set_halt(ep);
1da177e4
LT
2346 ep->stopped = 1;
2347 if (req)
fae3c158 2348 done(ep, req, -EOVERFLOW);
1da177e4
LT
2349 req = NULL;
2350 }
2351 }
2352 }
2353
fae3c158 2354 if (unlikely(!req))
1da177e4
LT
2355 return;
2356
2357 /* manual DMA queue advance after short OUT */
fae3c158 2358 if (likely(ep->dma)) {
3e76fdcb 2359 if (t & BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
1da177e4
LT
2360 u32 count;
2361 int stopped = ep->stopped;
2362
2363 /* TRANSFERRED works around OUT_DONE erratum 0112.
2364 * we expect (N <= maxpacket) bytes; host wrote M.
2365 * iff (M < N) we won't ever see a DMA interrupt.
2366 */
2367 ep->stopped = 1;
fae3c158 2368 for (count = 0; ; t = readl(&ep->regs->ep_stat)) {
1da177e4
LT
2369
2370 /* any preceding dma transfers must finish.
2371 * dma handles (M >= N), may empty the queue
2372 */
fae3c158 2373 scan_dma_completions(ep);
ae8e530a
RRD
2374 if (unlikely(list_empty(&ep->queue) ||
2375 ep->out_overflow)) {
1da177e4
LT
2376 req = NULL;
2377 break;
2378 }
fae3c158 2379 req = list_entry(ep->queue.next,
1da177e4
LT
2380 struct net2280_request, queue);
2381
2382 /* here either (M < N), a "real" short rx;
2383 * or (M == N) and the queue didn't empty
2384 */
3e76fdcb 2385 if (likely(t & BIT(FIFO_EMPTY))) {
fae3c158 2386 count = readl(&ep->dma->dmacount);
1da177e4 2387 count &= DMA_BYTE_COUNT_MASK;
fae3c158 2388 if (readl(&ep->dma->dmadesc)
1da177e4
LT
2389 != req->td_dma)
2390 req = NULL;
2391 break;
2392 }
2393 udelay(1);
2394 }
2395
2396 /* stop DMA, leave ep NAKing */
3e76fdcb 2397 writel(BIT(DMA_ABORT), &ep->dma->dmastat);
fae3c158 2398 spin_stop_dma(ep->dma);
1da177e4 2399
fae3c158 2400 if (likely(req)) {
1da177e4 2401 req->td->dmacount = 0;
fae3c158
RRD
2402 t = readl(&ep->regs->ep_avail);
2403 dma_done(ep, req, count,
901b3d75
DB
2404 (ep->out_overflow || t)
2405 ? -EOVERFLOW : 0);
1da177e4
LT
2406 }
2407
2408 /* also flush to prevent erratum 0106 trouble */
ae8e530a
RRD
2409 if (unlikely(ep->out_overflow ||
2410 (ep->dev->chiprev == 0x0100 &&
2411 ep->dev->gadget.speed
2412 == USB_SPEED_FULL))) {
fae3c158 2413 out_flush(ep);
1da177e4
LT
2414 ep->out_overflow = 0;
2415 }
2416
2417 /* (re)start dma if needed, stop NAKing */
2418 ep->stopped = stopped;
fae3c158
RRD
2419 if (!list_empty(&ep->queue))
2420 restart_dma(ep);
1da177e4 2421 } else
e56e69cc 2422 ep_dbg(ep->dev, "%s dma ep_stat %08x ??\n",
1da177e4
LT
2423 ep->ep.name, t);
2424 return;
2425
2426 /* data packet(s) received (in the fifo, OUT) */
3e76fdcb 2427 } else if (t & BIT(DATA_PACKET_RECEIVED_INTERRUPT)) {
fae3c158 2428 if (read_fifo(ep, req) && ep->num != 0)
1da177e4
LT
2429 mode = 2;
2430
2431 /* data packet(s) transmitted (IN) */
3e76fdcb 2432 } else if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT)) {
1da177e4
LT
2433 unsigned len;
2434
2435 len = req->req.length - req->req.actual;
2436 if (len > ep->ep.maxpacket)
2437 len = ep->ep.maxpacket;
2438 req->req.actual += len;
2439
2440 /* if we wrote it all, we're usually done */
fae3c158
RRD
2441 /* send zlps until the status stage */
2442 if ((req->req.actual == req->req.length) &&
2443 (!req->req.zero || len != ep->ep.maxpacket) && ep->num)
1da177e4 2444 mode = 2;
1da177e4
LT
2445
2446 /* there was nothing to do ... */
2447 } else if (mode == 1)
2448 return;
2449
2450 /* done */
2451 if (mode == 2) {
2452 /* stream endpoints often resubmit/unlink in completion */
fae3c158 2453 done(ep, req, 0);
1da177e4
LT
2454
2455 /* maybe advance queue to next request */
2456 if (ep->num == 0) {
2457 /* NOTE: net2280 could let gadget driver start the
2458 * status stage later. since not all controllers let
2459 * them control that, the api doesn't (yet) allow it.
2460 */
2461 if (!ep->stopped)
fae3c158 2462 allow_status(ep);
1da177e4
LT
2463 req = NULL;
2464 } else {
fae3c158
RRD
2465 if (!list_empty(&ep->queue) && !ep->stopped)
2466 req = list_entry(ep->queue.next,
1da177e4
LT
2467 struct net2280_request, queue);
2468 else
2469 req = NULL;
2470 if (req && !ep->is_in)
fae3c158 2471 stop_out_naking(ep);
1da177e4
LT
2472 }
2473 }
2474
2475 /* is there a buffer for the next packet?
2476 * for best streaming performance, make sure there is one.
2477 */
2478 if (req && !ep->stopped) {
2479
2480 /* load IN fifo with next packet (may be zlp) */
3e76fdcb 2481 if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT))
fae3c158 2482 write_fifo(ep, &req->req);
1da177e4
LT
2483 }
2484}
2485
fae3c158 2486static struct net2280_ep *get_ep_by_addr(struct net2280 *dev, u16 wIndex)
1da177e4
LT
2487{
2488 struct net2280_ep *ep;
2489
2490 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
fae3c158
RRD
2491 return &dev->ep[0];
2492 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1da177e4
LT
2493 u8 bEndpointAddress;
2494
2495 if (!ep->desc)
2496 continue;
2497 bEndpointAddress = ep->desc->bEndpointAddress;
2498 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
2499 continue;
2500 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
2501 return ep;
2502 }
2503 return NULL;
2504}
2505
adc82f77
RRD
2506static void defect7374_workaround(struct net2280 *dev, struct usb_ctrlrequest r)
2507{
2508 u32 scratch, fsmvalue;
2509 u32 ack_wait_timeout, state;
2510
2511 /* Workaround for Defect 7374 (U1/U2 erroneously rejected): */
2512 scratch = get_idx_reg(dev->regs, SCRATCH);
2513 fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD);
2514 scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
2515
2516 if (!((fsmvalue == DEFECT7374_FSM_WAITING_FOR_CONTROL_READ) &&
2517 (r.bRequestType & USB_DIR_IN)))
2518 return;
2519
2520 /* This is the first Control Read for this connection: */
3e76fdcb 2521 if (!(readl(&dev->usb->usbstat) & BIT(SUPER_SPEED_MODE))) {
adc82f77
RRD
2522 /*
2523 * Connection is NOT SS:
2524 * - Connection must be FS or HS.
2525 * - This FSM state should allow workaround software to
2526 * run after the next USB connection.
2527 */
2528 scratch |= DEFECT7374_FSM_NON_SS_CONTROL_READ;
5517525e 2529 dev->bug7734_patched = 1;
adc82f77
RRD
2530 goto restore_data_eps;
2531 }
2532
2533 /* Connection is SS: */
2534 for (ack_wait_timeout = 0;
2535 ack_wait_timeout < DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS;
2536 ack_wait_timeout++) {
2537
2538 state = readl(&dev->plregs->pl_ep_status_1)
2539 & (0xff << STATE);
2540 if ((state >= (ACK_GOOD_NORMAL << STATE)) &&
2541 (state <= (ACK_GOOD_MORE_ACKS_TO_COME << STATE))) {
2542 scratch |= DEFECT7374_FSM_SS_CONTROL_READ;
5517525e 2543 dev->bug7734_patched = 1;
adc82f77
RRD
2544 break;
2545 }
2546
2547 /*
2548 * We have not yet received host's Data Phase ACK
2549 * - Wait and try again.
2550 */
2551 udelay(DEFECT_7374_PROCESSOR_WAIT_TIME);
2552
2553 continue;
2554 }
2555
2556
2557 if (ack_wait_timeout >= DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS) {
e56e69cc 2558 ep_err(dev, "FAIL: Defect 7374 workaround waited but failed "
adc82f77 2559 "to detect SS host's data phase ACK.");
e56e69cc 2560 ep_err(dev, "PL_EP_STATUS_1(23:16):.Expected from 0x11 to 0x16"
adc82f77
RRD
2561 "got 0x%2.2x.\n", state >> STATE);
2562 } else {
e56e69cc 2563 ep_warn(dev, "INFO: Defect 7374 workaround waited about\n"
adc82f77
RRD
2564 "%duSec for Control Read Data Phase ACK\n",
2565 DEFECT_7374_PROCESSOR_WAIT_TIME * ack_wait_timeout);
2566 }
2567
2568restore_data_eps:
2569 /*
2570 * Restore data EPs to their pre-workaround settings (disabled,
2571 * initialized, and other details).
2572 */
2573 defect7374_disable_data_eps(dev);
2574
2575 set_idx_reg(dev->regs, SCRATCH, scratch);
2576
2577 return;
2578}
2579
e0cbb046 2580static void ep_clear_seqnum(struct net2280_ep *ep)
adc82f77
RRD
2581{
2582 struct net2280 *dev = ep->dev;
2583 u32 val;
2584 static const u32 ep_pl[9] = { 0, 3, 4, 7, 8, 2, 5, 6, 9 };
2585
e0cbb046
RRD
2586 val = readl(&dev->plregs->pl_ep_ctrl) & ~0x1f;
2587 val |= ep_pl[ep->num];
2588 writel(val, &dev->plregs->pl_ep_ctrl);
2589 val |= BIT(SEQUENCE_NUMBER_RESET);
2590 writel(val, &dev->plregs->pl_ep_ctrl);
adc82f77 2591
e0cbb046 2592 return;
adc82f77
RRD
2593}
2594
adc82f77
RRD
2595static void handle_stat0_irqs_superspeed(struct net2280 *dev,
2596 struct net2280_ep *ep, struct usb_ctrlrequest r)
2597{
2598 int tmp = 0;
2599
2600#define w_value le16_to_cpu(r.wValue)
2601#define w_index le16_to_cpu(r.wIndex)
2602#define w_length le16_to_cpu(r.wLength)
2603
2604 switch (r.bRequest) {
2605 struct net2280_ep *e;
2606 u16 status;
2607
2608 case USB_REQ_SET_CONFIGURATION:
2609 dev->addressed_state = !w_value;
2610 goto usb3_delegate;
2611
2612 case USB_REQ_GET_STATUS:
2613 switch (r.bRequestType) {
2614 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2615 status = dev->wakeup_enable ? 0x02 : 0x00;
2616 if (dev->selfpowered)
3e76fdcb 2617 status |= BIT(0);
adc82f77
RRD
2618 status |= (dev->u1_enable << 2 | dev->u2_enable << 3 |
2619 dev->ltm_enable << 4);
2620 writel(0, &dev->epregs[0].ep_irqenb);
2621 set_fifo_bytecount(ep, sizeof(status));
2622 writel((__force u32) status, &dev->epregs[0].ep_data);
2623 allow_status_338x(ep);
2624 break;
2625
2626 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2627 e = get_ep_by_addr(dev, w_index);
2628 if (!e)
2629 goto do_stall3;
2630 status = readl(&e->regs->ep_rsp) &
3e76fdcb 2631 BIT(CLEAR_ENDPOINT_HALT);
adc82f77
RRD
2632 writel(0, &dev->epregs[0].ep_irqenb);
2633 set_fifo_bytecount(ep, sizeof(status));
2634 writel((__force u32) status, &dev->epregs[0].ep_data);
2635 allow_status_338x(ep);
2636 break;
2637
2638 default:
2639 goto usb3_delegate;
2640 }
2641 break;
2642
2643 case USB_REQ_CLEAR_FEATURE:
2644 switch (r.bRequestType) {
2645 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2646 if (!dev->addressed_state) {
2647 switch (w_value) {
2648 case USB_DEVICE_U1_ENABLE:
2649 dev->u1_enable = 0;
2650 writel(readl(&dev->usb_ext->usbctl2) &
3e76fdcb 2651 ~BIT(U1_ENABLE),
adc82f77
RRD
2652 &dev->usb_ext->usbctl2);
2653 allow_status_338x(ep);
2654 goto next_endpoints3;
2655
2656 case USB_DEVICE_U2_ENABLE:
2657 dev->u2_enable = 0;
2658 writel(readl(&dev->usb_ext->usbctl2) &
3e76fdcb 2659 ~BIT(U2_ENABLE),
adc82f77
RRD
2660 &dev->usb_ext->usbctl2);
2661 allow_status_338x(ep);
2662 goto next_endpoints3;
2663
2664 case USB_DEVICE_LTM_ENABLE:
2665 dev->ltm_enable = 0;
2666 writel(readl(&dev->usb_ext->usbctl2) &
3e76fdcb 2667 ~BIT(LTM_ENABLE),
adc82f77
RRD
2668 &dev->usb_ext->usbctl2);
2669 allow_status_338x(ep);
2670 goto next_endpoints3;
2671
2672 default:
2673 break;
2674 }
2675 }
2676 if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
2677 dev->wakeup_enable = 0;
2678 writel(readl(&dev->usb->usbctl) &
3e76fdcb 2679 ~BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
adc82f77
RRD
2680 &dev->usb->usbctl);
2681 allow_status_338x(ep);
2682 break;
2683 }
2684 goto usb3_delegate;
2685
2686 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2687 e = get_ep_by_addr(dev, w_index);
2688 if (!e)
2689 goto do_stall3;
2690 if (w_value != USB_ENDPOINT_HALT)
2691 goto do_stall3;
e56e69cc 2692 ep_vdbg(dev, "%s clear halt\n", e->ep.name);
e0cbb046
RRD
2693 /*
2694 * Workaround for SS SeqNum not cleared via
2695 * Endpoint Halt (Clear) bit. select endpoint
2696 */
2697 ep_clear_seqnum(e);
2698 clear_halt(e);
adc82f77
RRD
2699 if (!list_empty(&e->queue) && e->td_dma)
2700 restart_dma(e);
2701 allow_status(ep);
2702 ep->stopped = 1;
2703 break;
2704
2705 default:
2706 goto usb3_delegate;
2707 }
2708 break;
2709 case USB_REQ_SET_FEATURE:
2710 switch (r.bRequestType) {
2711 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2712 if (!dev->addressed_state) {
2713 switch (w_value) {
2714 case USB_DEVICE_U1_ENABLE:
2715 dev->u1_enable = 1;
2716 writel(readl(&dev->usb_ext->usbctl2) |
3e76fdcb 2717 BIT(U1_ENABLE),
adc82f77
RRD
2718 &dev->usb_ext->usbctl2);
2719 allow_status_338x(ep);
2720 goto next_endpoints3;
2721
2722 case USB_DEVICE_U2_ENABLE:
2723 dev->u2_enable = 1;
2724 writel(readl(&dev->usb_ext->usbctl2) |
3e76fdcb 2725 BIT(U2_ENABLE),
adc82f77
RRD
2726 &dev->usb_ext->usbctl2);
2727 allow_status_338x(ep);
2728 goto next_endpoints3;
2729
2730 case USB_DEVICE_LTM_ENABLE:
2731 dev->ltm_enable = 1;
2732 writel(readl(&dev->usb_ext->usbctl2) |
3e76fdcb 2733 BIT(LTM_ENABLE),
adc82f77
RRD
2734 &dev->usb_ext->usbctl2);
2735 allow_status_338x(ep);
2736 goto next_endpoints3;
2737 default:
2738 break;
2739 }
2740 }
2741
2742 if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
2743 dev->wakeup_enable = 1;
2744 writel(readl(&dev->usb->usbctl) |
3e76fdcb 2745 BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
adc82f77
RRD
2746 &dev->usb->usbctl);
2747 allow_status_338x(ep);
2748 break;
2749 }
2750 goto usb3_delegate;
2751
2752 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2753 e = get_ep_by_addr(dev, w_index);
2754 if (!e || (w_value != USB_ENDPOINT_HALT))
2755 goto do_stall3;
cf8b1cde
RRD
2756 ep->stopped = 1;
2757 if (ep->num == 0)
2758 ep->dev->protocol_stall = 1;
2759 else {
2760 if (ep->dma)
e721c457 2761 abort_dma(ep);
e0cbb046 2762 set_halt(ep);
cf8b1cde 2763 }
adc82f77
RRD
2764 allow_status_338x(ep);
2765 break;
2766
2767 default:
2768 goto usb3_delegate;
2769 }
2770
2771 break;
2772 default:
2773
2774usb3_delegate:
e56e69cc 2775 ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x ep_cfg %08x\n",
adc82f77
RRD
2776 r.bRequestType, r.bRequest,
2777 w_value, w_index, w_length,
2778 readl(&ep->cfg->ep_cfg));
2779
2780 ep->responded = 0;
2781 spin_unlock(&dev->lock);
2782 tmp = dev->driver->setup(&dev->gadget, &r);
2783 spin_lock(&dev->lock);
2784 }
2785do_stall3:
2786 if (tmp < 0) {
e56e69cc 2787 ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n",
adc82f77
RRD
2788 r.bRequestType, r.bRequest, tmp);
2789 dev->protocol_stall = 1;
2790 /* TD 9.9 Halt Endpoint test. TD 9.22 Set feature test */
e0cbb046 2791 set_halt(ep);
adc82f77
RRD
2792 }
2793
2794next_endpoints3:
2795
2796#undef w_value
2797#undef w_index
2798#undef w_length
2799
2800 return;
2801}
2802
fae3c158 2803static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
1da177e4
LT
2804{
2805 struct net2280_ep *ep;
2806 u32 num, scratch;
2807
2808 /* most of these don't need individual acks */
3e76fdcb 2809 stat &= ~BIT(INTA_ASSERTED);
1da177e4
LT
2810 if (!stat)
2811 return;
e56e69cc 2812 /* ep_dbg(dev, "irqstat0 %04x\n", stat); */
1da177e4
LT
2813
2814 /* starting a control request? */
3e76fdcb 2815 if (unlikely(stat & BIT(SETUP_PACKET_INTERRUPT))) {
1da177e4 2816 union {
fae3c158 2817 u32 raw[2];
1da177e4
LT
2818 struct usb_ctrlrequest r;
2819 } u;
950ee4c8 2820 int tmp;
1da177e4
LT
2821 struct net2280_request *req;
2822
2823 if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
adc82f77 2824 u32 val = readl(&dev->usb->usbstat);
3e76fdcb 2825 if (val & BIT(SUPER_SPEED)) {
adc82f77
RRD
2826 dev->gadget.speed = USB_SPEED_SUPER;
2827 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
2828 EP0_SS_MAX_PACKET_SIZE);
3e76fdcb 2829 } else if (val & BIT(HIGH_SPEED)) {
1da177e4 2830 dev->gadget.speed = USB_SPEED_HIGH;
adc82f77
RRD
2831 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
2832 EP0_HS_MAX_PACKET_SIZE);
2833 } else {
1da177e4 2834 dev->gadget.speed = USB_SPEED_FULL;
adc82f77
RRD
2835 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
2836 EP0_HS_MAX_PACKET_SIZE);
2837 }
fae3c158 2838 net2280_led_speed(dev, dev->gadget.speed);
e56e69cc 2839 ep_dbg(dev, "%s\n",
fae3c158 2840 usb_speed_string(dev->gadget.speed));
1da177e4
LT
2841 }
2842
fae3c158 2843 ep = &dev->ep[0];
1da177e4
LT
2844 ep->irqs++;
2845
2846 /* make sure any leftover request state is cleared */
3e76fdcb 2847 stat &= ~BIT(ENDPOINT_0_INTERRUPT);
fae3c158
RRD
2848 while (!list_empty(&ep->queue)) {
2849 req = list_entry(ep->queue.next,
1da177e4 2850 struct net2280_request, queue);
fae3c158 2851 done(ep, req, (req->req.actual == req->req.length)
1da177e4
LT
2852 ? 0 : -EPROTO);
2853 }
2854 ep->stopped = 0;
2855 dev->protocol_stall = 0;
5d1b6840 2856 if (!(dev->quirks & PLX_SUPERSPEED)) {
2eeb0016 2857 if (ep->dev->quirks & PLX_2280)
3e76fdcb
RRD
2858 tmp = BIT(FIFO_OVERFLOW) |
2859 BIT(FIFO_UNDERFLOW);
adc82f77
RRD
2860 else
2861 tmp = 0;
2862
3e76fdcb
RRD
2863 writel(tmp | BIT(TIMEOUT) |
2864 BIT(USB_STALL_SENT) |
2865 BIT(USB_IN_NAK_SENT) |
2866 BIT(USB_IN_ACK_RCVD) |
2867 BIT(USB_OUT_PING_NAK_SENT) |
2868 BIT(USB_OUT_ACK_SENT) |
2869 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
2870 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
2871 BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
2872 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
2873 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
ae8e530a
RRD
2874 BIT(DATA_IN_TOKEN_INTERRUPT),
2875 &ep->regs->ep_stat);
adc82f77
RRD
2876 }
2877 u.raw[0] = readl(&dev->usb->setup0123);
2878 u.raw[1] = readl(&dev->usb->setup4567);
901b3d75 2879
fae3c158
RRD
2880 cpu_to_le32s(&u.raw[0]);
2881 cpu_to_le32s(&u.raw[1]);
1da177e4 2882
5517525e 2883 if ((dev->quirks & PLX_SUPERSPEED) && !dev->bug7734_patched)
adc82f77
RRD
2884 defect7374_workaround(dev, u.r);
2885
950ee4c8
GL
2886 tmp = 0;
2887
01ee7d70
DB
2888#define w_value le16_to_cpu(u.r.wValue)
2889#define w_index le16_to_cpu(u.r.wIndex)
2890#define w_length le16_to_cpu(u.r.wLength)
1da177e4
LT
2891
2892 /* ack the irq */
3e76fdcb
RRD
2893 writel(BIT(SETUP_PACKET_INTERRUPT), &dev->regs->irqstat0);
2894 stat ^= BIT(SETUP_PACKET_INTERRUPT);
1da177e4
LT
2895
2896 /* watch control traffic at the token level, and force
2897 * synchronization before letting the status stage happen.
2898 * FIXME ignore tokens we'll NAK, until driver responds.
2899 * that'll mean a lot less irqs for some drivers.
2900 */
2901 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
2902 if (ep->is_in) {
3e76fdcb
RRD
2903 scratch = BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
2904 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
2905 BIT(DATA_IN_TOKEN_INTERRUPT);
fae3c158 2906 stop_out_naking(ep);
1da177e4 2907 } else
3e76fdcb
RRD
2908 scratch = BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
2909 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
2910 BIT(DATA_IN_TOKEN_INTERRUPT);
fae3c158 2911 writel(scratch, &dev->epregs[0].ep_irqenb);
1da177e4
LT
2912
2913 /* we made the hardware handle most lowlevel requests;
2914 * everything else goes uplevel to the gadget code.
2915 */
1f26e28d 2916 ep->responded = 1;
adc82f77
RRD
2917
2918 if (dev->gadget.speed == USB_SPEED_SUPER) {
2919 handle_stat0_irqs_superspeed(dev, ep, u.r);
2920 goto next_endpoints;
2921 }
2922
1da177e4
LT
2923 switch (u.r.bRequest) {
2924 case USB_REQ_GET_STATUS: {
2925 struct net2280_ep *e;
320f3459 2926 __le32 status;
1da177e4
LT
2927
2928 /* hw handles device and interface status */
2929 if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
2930 goto delegate;
fae3c158
RRD
2931 e = get_ep_by_addr(dev, w_index);
2932 if (!e || w_length > 2)
1da177e4
LT
2933 goto do_stall;
2934
3e76fdcb 2935 if (readl(&e->regs->ep_rsp) & BIT(SET_ENDPOINT_HALT))
fae3c158 2936 status = cpu_to_le32(1);
1da177e4 2937 else
fae3c158 2938 status = cpu_to_le32(0);
1da177e4
LT
2939
2940 /* don't bother with a request object! */
fae3c158
RRD
2941 writel(0, &dev->epregs[0].ep_irqenb);
2942 set_fifo_bytecount(ep, w_length);
2943 writel((__force u32)status, &dev->epregs[0].ep_data);
2944 allow_status(ep);
e56e69cc 2945 ep_vdbg(dev, "%s stat %02x\n", ep->ep.name, status);
1da177e4
LT
2946 goto next_endpoints;
2947 }
2948 break;
2949 case USB_REQ_CLEAR_FEATURE: {
2950 struct net2280_ep *e;
2951
2952 /* hw handles device features */
2953 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
2954 goto delegate;
ae8e530a 2955 if (w_value != USB_ENDPOINT_HALT || w_length != 0)
1da177e4 2956 goto do_stall;
fae3c158
RRD
2957 e = get_ep_by_addr(dev, w_index);
2958 if (!e)
1da177e4 2959 goto do_stall;
8066134f 2960 if (e->wedged) {
e56e69cc 2961 ep_vdbg(dev, "%s wedged, halt not cleared\n",
8066134f
AS
2962 ep->ep.name);
2963 } else {
e56e69cc 2964 ep_vdbg(dev, "%s clear halt\n", e->ep.name);
8066134f 2965 clear_halt(e);
2eeb0016 2966 if ((ep->dev->quirks & PLX_SUPERSPEED) &&
adc82f77
RRD
2967 !list_empty(&e->queue) && e->td_dma)
2968 restart_dma(e);
8066134f 2969 }
fae3c158 2970 allow_status(ep);
1da177e4
LT
2971 goto next_endpoints;
2972 }
2973 break;
2974 case USB_REQ_SET_FEATURE: {
2975 struct net2280_ep *e;
2976
2977 /* hw handles device features */
2978 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
2979 goto delegate;
ae8e530a 2980 if (w_value != USB_ENDPOINT_HALT || w_length != 0)
1da177e4 2981 goto do_stall;
fae3c158
RRD
2982 e = get_ep_by_addr(dev, w_index);
2983 if (!e)
1da177e4 2984 goto do_stall;
8066134f
AS
2985 if (e->ep.name == ep0name)
2986 goto do_stall;
fae3c158 2987 set_halt(e);
2eeb0016 2988 if ((dev->quirks & PLX_SUPERSPEED) && e->dma)
adc82f77 2989 abort_dma(e);
fae3c158 2990 allow_status(ep);
e56e69cc 2991 ep_vdbg(dev, "%s set halt\n", ep->ep.name);
1da177e4
LT
2992 goto next_endpoints;
2993 }
2994 break;
2995 default:
2996delegate:
e56e69cc 2997 ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x "
1da177e4
LT
2998 "ep_cfg %08x\n",
2999 u.r.bRequestType, u.r.bRequest,
320f3459 3000 w_value, w_index, w_length,
adc82f77 3001 readl(&ep->cfg->ep_cfg));
1f26e28d 3002 ep->responded = 0;
fae3c158
RRD
3003 spin_unlock(&dev->lock);
3004 tmp = dev->driver->setup(&dev->gadget, &u.r);
3005 spin_lock(&dev->lock);
1da177e4
LT
3006 }
3007
3008 /* stall ep0 on error */
3009 if (tmp < 0) {
3010do_stall:
e56e69cc 3011 ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n",
1da177e4
LT
3012 u.r.bRequestType, u.r.bRequest, tmp);
3013 dev->protocol_stall = 1;
3014 }
3015
3016 /* some in/out token irq should follow; maybe stall then.
3017 * driver must queue a request (even zlp) or halt ep0
3018 * before the host times out.
3019 */
3020 }
3021
320f3459
DB
3022#undef w_value
3023#undef w_index
3024#undef w_length
3025
1da177e4
LT
3026next_endpoints:
3027 /* endpoint data irq ? */
3028 scratch = stat & 0x7f;
3029 stat &= ~0x7f;
3030 for (num = 0; scratch; num++) {
3031 u32 t;
3032
3033 /* do this endpoint's FIFO and queue need tending? */
3e76fdcb 3034 t = BIT(num);
1da177e4
LT
3035 if ((scratch & t) == 0)
3036 continue;
3037 scratch ^= t;
3038
fae3c158
RRD
3039 ep = &dev->ep[num];
3040 handle_ep_small(ep);
1da177e4
LT
3041 }
3042
3043 if (stat)
e56e69cc 3044 ep_dbg(dev, "unhandled irqstat0 %08x\n", stat);
1da177e4
LT
3045}
3046
3e76fdcb
RRD
3047#define DMA_INTERRUPTS (BIT(DMA_D_INTERRUPT) | \
3048 BIT(DMA_C_INTERRUPT) | \
3049 BIT(DMA_B_INTERRUPT) | \
3050 BIT(DMA_A_INTERRUPT))
1da177e4 3051#define PCI_ERROR_INTERRUPTS ( \
3e76fdcb
RRD
3052 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT) | \
3053 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT) | \
3054 BIT(PCI_RETRY_ABORT_INTERRUPT))
1da177e4 3055
fae3c158 3056static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
1da177e4
LT
3057{
3058 struct net2280_ep *ep;
3059 u32 tmp, num, mask, scratch;
3060
3061 /* after disconnect there's nothing else to do! */
3e76fdcb
RRD
3062 tmp = BIT(VBUS_INTERRUPT) | BIT(ROOT_PORT_RESET_INTERRUPT);
3063 mask = BIT(SUPER_SPEED) | BIT(HIGH_SPEED) | BIT(FULL_SPEED);
1da177e4
LT
3064
3065 /* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set.
fb914ebf 3066 * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and
901b3d75 3067 * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT
1da177e4
LT
3068 * only indicates a change in the reset state).
3069 */
3070 if (stat & tmp) {
b611e424
AS
3071 bool reset = false;
3072 bool disconnect = false;
3073
3074 /*
3075 * Ignore disconnects and resets if the speed hasn't been set.
3076 * VBUS can bounce and there's always an initial reset.
3077 */
fae3c158 3078 writel(tmp, &dev->regs->irqstat1);
b611e424
AS
3079 if (dev->gadget.speed != USB_SPEED_UNKNOWN) {
3080 if ((stat & BIT(VBUS_INTERRUPT)) &&
3081 (readl(&dev->usb->usbctl) &
3082 BIT(VBUS_PIN)) == 0) {
3083 disconnect = true;
3084 ep_dbg(dev, "disconnect %s\n",
3085 dev->driver->driver.name);
3086 } else if ((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) &&
3087 (readl(&dev->usb->usbstat) & mask)
3088 == 0) {
3089 reset = true;
3090 ep_dbg(dev, "reset %s\n",
3091 dev->driver->driver.name);
3092 }
3093
3094 if (disconnect || reset) {
3095 stop_activity(dev, dev->driver);
3096 ep0_start(dev);
3097 spin_unlock(&dev->lock);
3098 if (reset)
3099 usb_gadget_udc_reset
3100 (&dev->gadget, dev->driver);
3101 else
3102 (dev->driver->disconnect)
3103 (&dev->gadget);
3104 spin_lock(&dev->lock);
3105 return;
3106 }
1da177e4
LT
3107 }
3108 stat &= ~tmp;
3109
3110 /* vBUS can bounce ... one of many reasons to ignore the
3111 * notion of hotplug events on bus connect/disconnect!
3112 */
3113 if (!stat)
3114 return;
3115 }
3116
3117 /* NOTE: chip stays in PCI D0 state for now, but it could
3118 * enter D1 to save more power
3119 */
3e76fdcb 3120 tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT);
1da177e4 3121 if (stat & tmp) {
fae3c158 3122 writel(tmp, &dev->regs->irqstat1);
3e76fdcb 3123 if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) {
1da177e4 3124 if (dev->driver->suspend)
fae3c158 3125 dev->driver->suspend(&dev->gadget);
1da177e4 3126 if (!enable_suspend)
3e76fdcb 3127 stat &= ~BIT(SUSPEND_REQUEST_INTERRUPT);
1da177e4
LT
3128 } else {
3129 if (dev->driver->resume)
fae3c158 3130 dev->driver->resume(&dev->gadget);
1da177e4
LT
3131 /* at high speed, note erratum 0133 */
3132 }
3133 stat &= ~tmp;
3134 }
3135
3136 /* clear any other status/irqs */
3137 if (stat)
fae3c158 3138 writel(stat, &dev->regs->irqstat1);
1da177e4
LT
3139
3140 /* some status we can just ignore */
2eeb0016 3141 if (dev->quirks & PLX_2280)
3e76fdcb
RRD
3142 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
3143 BIT(SUSPEND_REQUEST_INTERRUPT) |
3144 BIT(RESUME_INTERRUPT) |
3145 BIT(SOF_INTERRUPT));
950ee4c8 3146 else
3e76fdcb
RRD
3147 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
3148 BIT(RESUME_INTERRUPT) |
3149 BIT(SOF_DOWN_INTERRUPT) |
3150 BIT(SOF_INTERRUPT));
950ee4c8 3151
1da177e4
LT
3152 if (!stat)
3153 return;
e56e69cc 3154 /* ep_dbg(dev, "irqstat1 %08x\n", stat);*/
1da177e4
LT
3155
3156 /* DMA status, for ep-{a,b,c,d} */
3157 scratch = stat & DMA_INTERRUPTS;
3158 stat &= ~DMA_INTERRUPTS;
3159 scratch >>= 9;
3160 for (num = 0; scratch; num++) {
3161 struct net2280_dma_regs __iomem *dma;
3162
3e76fdcb 3163 tmp = BIT(num);
1da177e4
LT
3164 if ((tmp & scratch) == 0)
3165 continue;
3166 scratch ^= tmp;
3167
fae3c158 3168 ep = &dev->ep[num + 1];
1da177e4
LT
3169 dma = ep->dma;
3170
3171 if (!dma)
3172 continue;
3173
3174 /* clear ep's dma status */
fae3c158
RRD
3175 tmp = readl(&dma->dmastat);
3176 writel(tmp, &dma->dmastat);
1da177e4 3177
adc82f77 3178 /* dma sync*/
2eeb0016 3179 if (dev->quirks & PLX_SUPERSPEED) {
adc82f77
RRD
3180 u32 r_dmacount = readl(&dma->dmacount);
3181 if (!ep->is_in && (r_dmacount & 0x00FFFFFF) &&
3e76fdcb 3182 (tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT)))
adc82f77
RRD
3183 continue;
3184 }
3185
90664198
RRD
3186 if (!(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) {
3187 ep_dbg(ep->dev, "%s no xact done? %08x\n",
3188 ep->ep.name, tmp);
3189 continue;
1da177e4 3190 }
90664198 3191 stop_dma(ep->dma);
1da177e4
LT
3192
3193 /* OUT transfers terminate when the data from the
3194 * host is in our memory. Process whatever's done.
3195 * On this path, we know transfer's last packet wasn't
3196 * less than req->length. NAK_OUT_PACKETS may be set,
3197 * or the FIFO may already be holding new packets.
3198 *
3199 * IN transfers can linger in the FIFO for a very
3200 * long time ... we ignore that for now, accounting
3201 * precisely (like PIO does) needs per-packet irqs
3202 */
fae3c158 3203 scan_dma_completions(ep);
1da177e4
LT
3204
3205 /* disable dma on inactive queues; else maybe restart */
90664198 3206 if (!list_empty(&ep->queue)) {
fae3c158 3207 tmp = readl(&dma->dmactl);
90664198 3208 restart_dma(ep);
1da177e4
LT
3209 }
3210 ep->irqs++;
3211 }
3212
3213 /* NOTE: there are other PCI errors we might usefully notice.
3214 * if they appear very often, here's where to try recovering.
3215 */
3216 if (stat & PCI_ERROR_INTERRUPTS) {
e56e69cc 3217 ep_err(dev, "pci dma error; stat %08x\n", stat);
1da177e4
LT
3218 stat &= ~PCI_ERROR_INTERRUPTS;
3219 /* these are fatal errors, but "maybe" they won't
3220 * happen again ...
3221 */
fae3c158
RRD
3222 stop_activity(dev, dev->driver);
3223 ep0_start(dev);
1da177e4
LT
3224 stat = 0;
3225 }
3226
3227 if (stat)
e56e69cc 3228 ep_dbg(dev, "unhandled irqstat1 %08x\n", stat);
1da177e4
LT
3229}
3230
fae3c158 3231static irqreturn_t net2280_irq(int irq, void *_dev)
1da177e4
LT
3232{
3233 struct net2280 *dev = _dev;
3234
658ad5e0 3235 /* shared interrupt, not ours */
2eeb0016 3236 if ((dev->quirks & PLX_LEGACY) &&
3e76fdcb 3237 (!(readl(&dev->regs->irqstat0) & BIT(INTA_ASSERTED))))
658ad5e0
AS
3238 return IRQ_NONE;
3239
fae3c158 3240 spin_lock(&dev->lock);
1da177e4
LT
3241
3242 /* handle disconnect, dma, and more */
fae3c158 3243 handle_stat1_irqs(dev, readl(&dev->regs->irqstat1));
1da177e4
LT
3244
3245 /* control requests and PIO */
fae3c158 3246 handle_stat0_irqs(dev, readl(&dev->regs->irqstat0));
1da177e4 3247
2eeb0016 3248 if (dev->quirks & PLX_SUPERSPEED) {
adc82f77
RRD
3249 /* re-enable interrupt to trigger any possible new interrupt */
3250 u32 pciirqenb1 = readl(&dev->regs->pciirqenb1);
3251 writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1);
3252 writel(pciirqenb1, &dev->regs->pciirqenb1);
3253 }
3254
fae3c158 3255 spin_unlock(&dev->lock);
1da177e4
LT
3256
3257 return IRQ_HANDLED;
3258}
3259
3260/*-------------------------------------------------------------------------*/
3261
fae3c158 3262static void gadget_release(struct device *_dev)
1da177e4 3263{
fae3c158 3264 struct net2280 *dev = dev_get_drvdata(_dev);
1da177e4 3265
fae3c158 3266 kfree(dev);
1da177e4
LT
3267}
3268
3269/* tear down the binding between this driver and the pci device */
3270
fae3c158 3271static void net2280_remove(struct pci_dev *pdev)
1da177e4 3272{
fae3c158 3273 struct net2280 *dev = pci_get_drvdata(pdev);
1da177e4 3274
0f91349b
SAS
3275 usb_del_gadget_udc(&dev->gadget);
3276
6bea476c 3277 BUG_ON(dev->driver);
1da177e4
LT
3278
3279 /* then clean up the resources we allocated during probe() */
fae3c158 3280 net2280_led_shutdown(dev);
1da177e4
LT
3281 if (dev->requests) {
3282 int i;
3283 for (i = 1; i < 5; i++) {
fae3c158 3284 if (!dev->ep[i].dummy)
1da177e4 3285 continue;
fae3c158
RRD
3286 pci_pool_free(dev->requests, dev->ep[i].dummy,
3287 dev->ep[i].td_dma);
1da177e4 3288 }
fae3c158 3289 pci_pool_destroy(dev->requests);
1da177e4
LT
3290 }
3291 if (dev->got_irq)
fae3c158 3292 free_irq(pdev->irq, dev);
9c864c23 3293 if (dev->quirks & PLX_SUPERSPEED)
adc82f77 3294 pci_disable_msi(pdev);
1da177e4 3295 if (dev->regs)
fae3c158 3296 iounmap(dev->regs);
1da177e4 3297 if (dev->region)
fae3c158
RRD
3298 release_mem_region(pci_resource_start(pdev, 0),
3299 pci_resource_len(pdev, 0));
1da177e4 3300 if (dev->enabled)
fae3c158
RRD
3301 pci_disable_device(pdev);
3302 device_remove_file(&pdev->dev, &dev_attr_registers);
1da177e4 3303
e56e69cc 3304 ep_info(dev, "unbind\n");
1da177e4
LT
3305}
3306
3307/* wrap this driver around the specified device, but
3308 * don't respond over USB until a gadget driver binds to us.
3309 */
3310
fae3c158 3311static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1da177e4
LT
3312{
3313 struct net2280 *dev;
3314 unsigned long resource, len;
3315 void __iomem *base = NULL;
3316 int retval, i;
1da177e4 3317
1da177e4 3318 /* alloc, and start init */
fae3c158
RRD
3319 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3320 if (dev == NULL) {
1da177e4
LT
3321 retval = -ENOMEM;
3322 goto done;
3323 }
3324
fae3c158
RRD
3325 pci_set_drvdata(pdev, dev);
3326 spin_lock_init(&dev->lock);
2eeb0016 3327 dev->quirks = id->driver_data;
1da177e4
LT
3328 dev->pdev = pdev;
3329 dev->gadget.ops = &net2280_ops;
2eeb0016 3330 dev->gadget.max_speed = (dev->quirks & PLX_SUPERSPEED) ?
adc82f77 3331 USB_SPEED_SUPER : USB_SPEED_HIGH;
1da177e4
LT
3332
3333 /* the "gadget" abstracts/virtualizes the controller */
1da177e4
LT
3334 dev->gadget.name = driver_name;
3335
3336 /* now all the pci goodies ... */
fae3c158
RRD
3337 if (pci_enable_device(pdev) < 0) {
3338 retval = -ENODEV;
1da177e4
LT
3339 goto done;
3340 }
3341 dev->enabled = 1;
3342
3343 /* BAR 0 holds all the registers
3344 * BAR 1 is 8051 memory; unused here (note erratum 0103)
3345 * BAR 2 is fifo memory; unused here
3346 */
fae3c158
RRD
3347 resource = pci_resource_start(pdev, 0);
3348 len = pci_resource_len(pdev, 0);
3349 if (!request_mem_region(resource, len, driver_name)) {
e56e69cc 3350 ep_dbg(dev, "controller already in use\n");
1da177e4
LT
3351 retval = -EBUSY;
3352 goto done;
3353 }
3354 dev->region = 1;
3355
901b3d75
DB
3356 /* FIXME provide firmware download interface to put
3357 * 8051 code into the chip, e.g. to turn on PCI PM.
3358 */
3359
fae3c158 3360 base = ioremap_nocache(resource, len);
1da177e4 3361 if (base == NULL) {
e56e69cc 3362 ep_dbg(dev, "can't map memory\n");
1da177e4
LT
3363 retval = -EFAULT;
3364 goto done;
3365 }
3366 dev->regs = (struct net2280_regs __iomem *) base;
3367 dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080);
3368 dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100);
3369 dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180);
3370 dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
3371 dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
3372
2eeb0016 3373 if (dev->quirks & PLX_SUPERSPEED) {
adc82f77
RRD
3374 u32 fsmvalue;
3375 u32 usbstat;
3376 dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *)
3377 (base + 0x00b4);
3378 dev->fiforegs = (struct usb338x_fifo_regs __iomem *)
3379 (base + 0x0500);
3380 dev->llregs = (struct usb338x_ll_regs __iomem *)
3381 (base + 0x0700);
3382 dev->ll_lfps_regs = (struct usb338x_ll_lfps_regs __iomem *)
3383 (base + 0x0748);
3384 dev->ll_tsn_regs = (struct usb338x_ll_tsn_regs __iomem *)
3385 (base + 0x077c);
3386 dev->ll_chicken_reg = (struct usb338x_ll_chi_regs __iomem *)
3387 (base + 0x079c);
3388 dev->plregs = (struct usb338x_pl_regs __iomem *)
3389 (base + 0x0800);
3390 usbstat = readl(&dev->usb->usbstat);
fae3c158 3391 dev->enhanced_mode = !!(usbstat & BIT(11));
adc82f77
RRD
3392 dev->n_ep = (dev->enhanced_mode) ? 9 : 5;
3393 /* put into initial config, link up all endpoints */
3394 fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
3395 (0xf << DEFECT7374_FSM_FIELD);
3396 /* See if firmware needs to set up for workaround: */
5517525e
RRD
3397 if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) {
3398 dev->bug7734_patched = 1;
adc82f77 3399 writel(0, &dev->usb->usbctl);
5517525e
RRD
3400 } else
3401 dev->bug7734_patched = 0;
3402 } else {
adc82f77
RRD
3403 dev->enhanced_mode = 0;
3404 dev->n_ep = 7;
3405 /* put into initial config, link up all endpoints */
3406 writel(0, &dev->usb->usbctl);
3407 }
3408
fae3c158
RRD
3409 usb_reset(dev);
3410 usb_reinit(dev);
1da177e4
LT
3411
3412 /* irq setup after old hardware is cleaned up */
3413 if (!pdev->irq) {
e56e69cc 3414 ep_err(dev, "No IRQ. Check PCI setup!\n");
1da177e4
LT
3415 retval = -ENODEV;
3416 goto done;
3417 }
c6387a48 3418
9c864c23 3419 if (dev->quirks & PLX_SUPERSPEED)
adc82f77 3420 if (pci_enable_msi(pdev))
e56e69cc 3421 ep_err(dev, "Failed to enable MSI mode\n");
adc82f77 3422
fae3c158
RRD
3423 if (request_irq(pdev->irq, net2280_irq, IRQF_SHARED,
3424 driver_name, dev)) {
e56e69cc 3425 ep_err(dev, "request interrupt %d failed\n", pdev->irq);
1da177e4
LT
3426 retval = -EBUSY;
3427 goto done;
3428 }
3429 dev->got_irq = 1;
3430
3431 /* DMA setup */
3432 /* NOTE: we know only the 32 LSBs of dma addresses may be nonzero */
fae3c158
RRD
3433 dev->requests = pci_pool_create("requests", pdev,
3434 sizeof(struct net2280_dma),
1da177e4
LT
3435 0 /* no alignment requirements */,
3436 0 /* or page-crossing issues */);
3437 if (!dev->requests) {
e56e69cc 3438 ep_dbg(dev, "can't get request pool\n");
1da177e4
LT
3439 retval = -ENOMEM;
3440 goto done;
3441 }
3442 for (i = 1; i < 5; i++) {
3443 struct net2280_dma *td;
3444
fae3c158
RRD
3445 td = pci_pool_alloc(dev->requests, GFP_KERNEL,
3446 &dev->ep[i].td_dma);
1da177e4 3447 if (!td) {
e56e69cc 3448 ep_dbg(dev, "can't get dummy %d\n", i);
1da177e4
LT
3449 retval = -ENOMEM;
3450 goto done;
3451 }
3452 td->dmacount = 0; /* not VALID */
1da177e4 3453 td->dmadesc = td->dmaaddr;
fae3c158 3454 dev->ep[i].dummy = td;
1da177e4
LT
3455 }
3456
3457 /* enable lower-overhead pci memory bursts during DMA */
2eeb0016 3458 if (dev->quirks & PLX_LEGACY)
3e76fdcb
RRD
3459 writel(BIT(DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE) |
3460 /*
3461 * 256 write retries may not be enough...
3462 BIT(PCI_RETRY_ABORT_ENABLE) |
3463 */
3464 BIT(DMA_READ_MULTIPLE_ENABLE) |
3465 BIT(DMA_READ_LINE_ENABLE),
3466 &dev->pci->pcimstctl);
1da177e4 3467 /* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
fae3c158
RRD
3468 pci_set_master(pdev);
3469 pci_try_set_mwi(pdev);
1da177e4
LT
3470
3471 /* ... also flushes any posted pci writes */
fae3c158 3472 dev->chiprev = get_idx_reg(dev->regs, REG_CHIPREV) & 0xffff;
1da177e4
LT
3473
3474 /* done */
e56e69cc
RRD
3475 ep_info(dev, "%s\n", driver_desc);
3476 ep_info(dev, "irq %d, pci mem %p, chip rev %04x\n",
c6387a48 3477 pdev->irq, base, dev->chiprev);
d588ff58 3478 ep_info(dev, "version: " DRIVER_VERSION "; %s\n",
adc82f77 3479 dev->enhanced_mode ? "enhanced mode" : "legacy mode");
fae3c158
RRD
3480 retval = device_create_file(&pdev->dev, &dev_attr_registers);
3481 if (retval)
3482 goto done;
1da177e4 3483
2901df68
FB
3484 retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget,
3485 gadget_release);
0f91349b
SAS
3486 if (retval)
3487 goto done;
1da177e4
LT
3488 return 0;
3489
3490done:
3491 if (dev)
fae3c158 3492 net2280_remove(pdev);
1da177e4
LT
3493 return retval;
3494}
3495
2d61bde7
AS
3496/* make sure the board is quiescent; otherwise it will continue
3497 * generating IRQs across the upcoming reboot.
3498 */
3499
fae3c158 3500static void net2280_shutdown(struct pci_dev *pdev)
2d61bde7 3501{
fae3c158 3502 struct net2280 *dev = pci_get_drvdata(pdev);
2d61bde7
AS
3503
3504 /* disable IRQs */
fae3c158
RRD
3505 writel(0, &dev->regs->pciirqenb0);
3506 writel(0, &dev->regs->pciirqenb1);
2d61bde7
AS
3507
3508 /* disable the pullup so the host will think we're gone */
fae3c158 3509 writel(0, &dev->usb->usbctl);
2f076077 3510
2d61bde7
AS
3511}
3512
1da177e4
LT
3513
3514/*-------------------------------------------------------------------------*/
3515
fae3c158 3516static const struct pci_device_id pci_ids[] = { {
901b3d75
DB
3517 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3518 .class_mask = ~0,
c2db8a8a 3519 .vendor = PCI_VENDOR_ID_PLX_LEGACY,
1da177e4
LT
3520 .device = 0x2280,
3521 .subvendor = PCI_ANY_ID,
3522 .subdevice = PCI_ANY_ID,
2eeb0016 3523 .driver_data = PLX_LEGACY | PLX_2280,
ae8e530a 3524 }, {
901b3d75
DB
3525 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3526 .class_mask = ~0,
c2db8a8a 3527 .vendor = PCI_VENDOR_ID_PLX_LEGACY,
950ee4c8
GL
3528 .device = 0x2282,
3529 .subvendor = PCI_ANY_ID,
3530 .subdevice = PCI_ANY_ID,
2eeb0016 3531 .driver_data = PLX_LEGACY,
ae8e530a 3532 },
adc82f77 3533 {
ae8e530a
RRD
3534 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3535 .class_mask = ~0,
3536 .vendor = PCI_VENDOR_ID_PLX,
3537 .device = 0x3380,
3538 .subvendor = PCI_ANY_ID,
3539 .subdevice = PCI_ANY_ID,
2eeb0016 3540 .driver_data = PLX_SUPERSPEED,
adc82f77
RRD
3541 },
3542 {
ae8e530a
RRD
3543 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3544 .class_mask = ~0,
3545 .vendor = PCI_VENDOR_ID_PLX,
3546 .device = 0x3382,
3547 .subvendor = PCI_ANY_ID,
3548 .subdevice = PCI_ANY_ID,
2eeb0016 3549 .driver_data = PLX_SUPERSPEED,
adc82f77
RRD
3550 },
3551{ /* end: all zeroes */ }
1da177e4 3552};
fae3c158 3553MODULE_DEVICE_TABLE(pci, pci_ids);
1da177e4
LT
3554
3555/* pci driver glue; this is a "new style" PCI driver module */
3556static struct pci_driver net2280_pci_driver = {
3557 .name = (char *) driver_name,
3558 .id_table = pci_ids,
3559
3560 .probe = net2280_probe,
3561 .remove = net2280_remove,
2d61bde7 3562 .shutdown = net2280_shutdown,
1da177e4
LT
3563
3564 /* FIXME add power management support */
3565};
3566
9a028e46
RRD
3567module_pci_driver(net2280_pci_driver);
3568
fae3c158
RRD
3569MODULE_DESCRIPTION(DRIVER_DESC);
3570MODULE_AUTHOR("David Brownell");
3571MODULE_LICENSE("GPL");