usb: gadget: udc: net2272: do not rely on 'driver' argument
[linux-2.6-block.git] / drivers / usb / gadget / udc / net2280.c
CommitLineData
1da177e4
LT
1/*
2 * Driver for the PLX NET2280 USB device controller.
3 * Specs and errata are available from <http://www.plxtech.com>.
4 *
901b3d75 5 * PLX Technology Inc. (formerly NetChip Technology) supported the
1da177e4
LT
6 * development of this driver.
7 *
8 *
9 * CODE STATUS HIGHLIGHTS
10 *
11 * This driver should work well with most "gadget" drivers, including
fa06920a 12 * the Mass Storage, Serial, and Ethernet/RNDIS gadget drivers
1da177e4
LT
13 * as well as Gadget Zero and Gadgetfs.
14 *
15 * DMA is enabled by default. Drivers using transfer queues might use
16 * DMA chaining to remove IRQ latencies between transfers. (Except when
17 * short OUT transfers happen.) Drivers can use the req->no_interrupt
18 * hint to completely eliminate some IRQs, if a later IRQ is guaranteed
19 * and DMA chaining is enabled.
20 *
adc82f77
RRD
21 * MSI is enabled by default. The legacy IRQ is used if MSI couldn't
22 * be enabled.
23 *
1da177e4
LT
24 * Note that almost all the errata workarounds here are only needed for
25 * rev1 chips. Rev1a silicon (0110) fixes almost all of them.
26 */
27
28/*
29 * Copyright (C) 2003 David Brownell
30 * Copyright (C) 2003-2005 PLX Technology, Inc.
adc82f77 31 * Copyright (C) 2014 Ricardo Ribalda - Qtechnology/AS
1da177e4 32 *
901b3d75
DB
33 * Modified Seth Levy 2005 PLX Technology, Inc. to provide compatibility
34 * with 2282 chip
950ee4c8 35 *
adc82f77
RRD
36 * Modified Ricardo Ribalda Qtechnology AS to provide compatibility
37 * with usb 338x chip. Based on PLX driver
38 *
1da177e4
LT
39 * This program is free software; you can redistribute it and/or modify
40 * it under the terms of the GNU General Public License as published by
41 * the Free Software Foundation; either version 2 of the License, or
42 * (at your option) any later version.
1da177e4
LT
43 */
44
1da177e4
LT
45#include <linux/module.h>
46#include <linux/pci.h>
682d4c80 47#include <linux/dma-mapping.h>
1da177e4
LT
48#include <linux/kernel.h>
49#include <linux/delay.h>
50#include <linux/ioport.h>
1da177e4 51#include <linux/slab.h>
1da177e4
LT
52#include <linux/errno.h>
53#include <linux/init.h>
54#include <linux/timer.h>
55#include <linux/list.h>
56#include <linux/interrupt.h>
57#include <linux/moduleparam.h>
58#include <linux/device.h>
5f848137 59#include <linux/usb/ch9.h>
9454a57a 60#include <linux/usb/gadget.h>
b38b03b3 61#include <linux/prefetch.h>
fae3c158 62#include <linux/io.h>
1da177e4
LT
63
64#include <asm/byteorder.h>
1da177e4 65#include <asm/irq.h>
1da177e4
LT
66#include <asm/unaligned.h>
67
adc82f77
RRD
68#define DRIVER_DESC "PLX NET228x/USB338x USB Peripheral Controller"
69#define DRIVER_VERSION "2005 Sept 27/v3.0"
1da177e4 70
1da177e4
LT
71#define EP_DONTUSE 13 /* nonzero */
72
73#define USE_RDK_LEDS /* GPIO pins control three LEDs */
74
75
fae3c158
RRD
76static const char driver_name[] = "net2280";
77static const char driver_desc[] = DRIVER_DESC;
1da177e4 78
adc82f77 79static const u32 ep_bit[9] = { 0, 17, 2, 19, 4, 1, 18, 3, 20 };
fae3c158
RRD
80static const char ep0name[] = "ep0";
81static const char *const ep_name[] = {
1da177e4
LT
82 ep0name,
83 "ep-a", "ep-b", "ep-c", "ep-d",
adc82f77 84 "ep-e", "ep-f", "ep-g", "ep-h",
1da177e4
LT
85};
86
87/* use_dma -- general goodness, fewer interrupts, less cpu load (vs PIO)
88 * use_dma_chaining -- dma descriptor queueing gives even more irq reduction
89 *
90 * The net2280 DMA engines are not tightly integrated with their FIFOs;
91 * not all cases are (yet) handled well in this driver or the silicon.
92 * Some gadget drivers work better with the dma support here than others.
93 * These two parameters let you use PIO or more aggressive DMA.
94 */
00d4db0e
RRD
95static bool use_dma = true;
96static bool use_dma_chaining;
97static bool use_msi = true;
1da177e4
LT
98
99/* "modprobe net2280 use_dma=n" etc */
ae8e530a
RRD
100module_param(use_dma, bool, 0444);
101module_param(use_dma_chaining, bool, 0444);
102module_param(use_msi, bool, 0444);
1da177e4
LT
103
104/* mode 0 == ep-{a,b,c,d} 1K fifo each
105 * mode 1 == ep-{a,b} 2K fifo each, ep-{c,d} unavailable
106 * mode 2 == ep-a 2K fifo, ep-{b,c} 1K each, ep-d unavailable
107 */
fae3c158 108static ushort fifo_mode;
1da177e4
LT
109
110/* "modprobe net2280 fifo_mode=1" etc */
ae8e530a 111module_param(fifo_mode, ushort, 0644);
1da177e4
LT
112
113/* enable_suspend -- When enabled, the driver will respond to
114 * USB suspend requests by powering down the NET2280. Otherwise,
25985edc 115 * USB suspend requests will be ignored. This is acceptable for
950ee4c8 116 * self-powered devices
1da177e4 117 */
00d4db0e 118static bool enable_suspend;
1da177e4
LT
119
120/* "modprobe net2280 enable_suspend=1" etc */
ae8e530a 121module_param(enable_suspend, bool, 0444);
1da177e4 122
2f076077
AS
123/* force full-speed operation */
124static bool full_speed;
125module_param(full_speed, bool, 0444);
126MODULE_PARM_DESC(full_speed, "force full-speed mode -- for testing only!");
1da177e4
LT
127
128#define DIR_STRING(bAddress) (((bAddress) & USB_DIR_IN) ? "in" : "out")
129
fae3c158 130static char *type_string(u8 bmAttributes)
1da177e4
LT
131{
132 switch ((bmAttributes) & USB_ENDPOINT_XFERTYPE_MASK) {
133 case USB_ENDPOINT_XFER_BULK: return "bulk";
134 case USB_ENDPOINT_XFER_ISOC: return "iso";
135 case USB_ENDPOINT_XFER_INT: return "intr";
2b84f92b 136 }
1da177e4
LT
137 return "control";
138}
1da177e4
LT
139
140#include "net2280.h"
141
3e76fdcb
RRD
142#define valid_bit cpu_to_le32(BIT(VALID_BIT))
143#define dma_done_ie cpu_to_le32(BIT(DMA_DONE_INTERRUPT_ENABLE))
1da177e4
LT
144
145/*-------------------------------------------------------------------------*/
adc82f77
RRD
146static inline void enable_pciirqenb(struct net2280_ep *ep)
147{
148 u32 tmp = readl(&ep->dev->regs->pciirqenb0);
149
2eeb0016 150 if (ep->dev->quirks & PLX_LEGACY)
3e76fdcb 151 tmp |= BIT(ep->num);
adc82f77 152 else
3e76fdcb 153 tmp |= BIT(ep_bit[ep->num]);
adc82f77
RRD
154 writel(tmp, &ep->dev->regs->pciirqenb0);
155
156 return;
157}
1da177e4
LT
158
159static int
fae3c158 160net2280_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
1da177e4
LT
161{
162 struct net2280 *dev;
163 struct net2280_ep *ep;
164 u32 max, tmp;
165 unsigned long flags;
adc82f77 166 static const u32 ep_key[9] = { 1, 0, 1, 0, 1, 1, 0, 1, 0 };
1da177e4 167
fae3c158 168 ep = container_of(_ep, struct net2280_ep, ep);
ae8e530a
RRD
169 if (!_ep || !desc || ep->desc || _ep->name == ep0name ||
170 desc->bDescriptorType != USB_DT_ENDPOINT)
1da177e4
LT
171 return -EINVAL;
172 dev = ep->dev;
173 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
174 return -ESHUTDOWN;
175
176 /* erratum 0119 workaround ties up an endpoint number */
177 if ((desc->bEndpointAddress & 0x0f) == EP_DONTUSE)
178 return -EDOM;
179
2eeb0016 180 if (dev->quirks & PLX_SUPERSPEED) {
adc82f77
RRD
181 if ((desc->bEndpointAddress & 0x0f) >= 0x0c)
182 return -EDOM;
183 ep->is_in = !!usb_endpoint_dir_in(desc);
184 if (dev->enhanced_mode && ep->is_in && ep_key[ep->num])
185 return -EINVAL;
186 }
187
1da177e4 188 /* sanity check ep-e/ep-f since their fifos are small */
fae3c158 189 max = usb_endpoint_maxp(desc) & 0x1fff;
2eeb0016 190 if (ep->num > 4 && max > 64 && (dev->quirks & PLX_LEGACY))
1da177e4
LT
191 return -ERANGE;
192
fae3c158 193 spin_lock_irqsave(&dev->lock, flags);
1da177e4
LT
194 _ep->maxpacket = max & 0x7ff;
195 ep->desc = desc;
196
197 /* ep_reset() has already been called */
198 ep->stopped = 0;
8066134f 199 ep->wedged = 0;
1da177e4
LT
200 ep->out_overflow = 0;
201
202 /* set speed-dependent max packet; may kick in high bandwidth */
adc82f77 203 set_max_speed(ep, max);
1da177e4
LT
204
205 /* FIFO lines can't go to different packets. PIO is ok, so
206 * use it instead of troublesome (non-bulk) multi-packet DMA.
207 */
208 if (ep->dma && (max % 4) != 0 && use_dma_chaining) {
e56e69cc 209 ep_dbg(ep->dev, "%s, no dma for maxpacket %d\n",
1da177e4
LT
210 ep->ep.name, ep->ep.maxpacket);
211 ep->dma = NULL;
212 }
213
214 /* set type, direction, address; reset fifo counters */
3e76fdcb 215 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
1da177e4
LT
216 tmp = (desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK);
217 if (tmp == USB_ENDPOINT_XFER_INT) {
218 /* erratum 0105 workaround prevents hs NYET */
ae8e530a
RRD
219 if (dev->chiprev == 0100 &&
220 dev->gadget.speed == USB_SPEED_HIGH &&
221 !(desc->bEndpointAddress & USB_DIR_IN))
3e76fdcb 222 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE),
1da177e4
LT
223 &ep->regs->ep_rsp);
224 } else if (tmp == USB_ENDPOINT_XFER_BULK) {
225 /* catch some particularly blatant driver bugs */
adc82f77
RRD
226 if ((dev->gadget.speed == USB_SPEED_SUPER && max != 1024) ||
227 (dev->gadget.speed == USB_SPEED_HIGH && max != 512) ||
228 (dev->gadget.speed == USB_SPEED_FULL && max > 64)) {
229 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
230 return -ERANGE;
231 }
232 }
fae3c158 233 ep->is_iso = (tmp == USB_ENDPOINT_XFER_ISOC);
adc82f77 234 /* Enable this endpoint */
2eeb0016 235 if (dev->quirks & PLX_LEGACY) {
adc82f77
RRD
236 tmp <<= ENDPOINT_TYPE;
237 tmp |= desc->bEndpointAddress;
238 /* default full fifo lines */
239 tmp |= (4 << ENDPOINT_BYTE_COUNT);
3e76fdcb 240 tmp |= BIT(ENDPOINT_ENABLE);
adc82f77
RRD
241 ep->is_in = (tmp & USB_DIR_IN) != 0;
242 } else {
243 /* In Legacy mode, only OUT endpoints are used */
244 if (dev->enhanced_mode && ep->is_in) {
245 tmp <<= IN_ENDPOINT_TYPE;
3e76fdcb 246 tmp |= BIT(IN_ENDPOINT_ENABLE);
adc82f77 247 /* Not applicable to Legacy */
3e76fdcb 248 tmp |= BIT(ENDPOINT_DIRECTION);
adc82f77
RRD
249 } else {
250 tmp <<= OUT_ENDPOINT_TYPE;
3e76fdcb 251 tmp |= BIT(OUT_ENDPOINT_ENABLE);
adc82f77
RRD
252 tmp |= (ep->is_in << ENDPOINT_DIRECTION);
253 }
254
255 tmp |= usb_endpoint_num(desc);
256 tmp |= (ep->ep.maxburst << MAX_BURST_SIZE);
257 }
258
259 /* Make sure all the registers are written before ep_rsp*/
260 wmb();
1da177e4
LT
261
262 /* for OUT transfers, block the rx fifo until a read is posted */
1da177e4 263 if (!ep->is_in)
3e76fdcb 264 writel(BIT(SET_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
2eeb0016 265 else if (!(dev->quirks & PLX_2280)) {
901b3d75
DB
266 /* Added for 2282, Don't use nak packets on an in endpoint,
267 * this was ignored on 2280
268 */
3e76fdcb
RRD
269 writel(BIT(CLEAR_NAK_OUT_PACKETS) |
270 BIT(CLEAR_NAK_OUT_PACKETS_MODE), &ep->regs->ep_rsp);
950ee4c8 271 }
1da177e4 272
adc82f77 273 writel(tmp, &ep->cfg->ep_cfg);
1da177e4
LT
274
275 /* enable irqs */
276 if (!ep->dma) { /* pio, per-packet */
adc82f77 277 enable_pciirqenb(ep);
1da177e4 278
3e76fdcb
RRD
279 tmp = BIT(DATA_PACKET_RECEIVED_INTERRUPT_ENABLE) |
280 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT_ENABLE);
2eeb0016 281 if (dev->quirks & PLX_2280)
fae3c158
RRD
282 tmp |= readl(&ep->regs->ep_irqenb);
283 writel(tmp, &ep->regs->ep_irqenb);
1da177e4 284 } else { /* dma, per-request */
3e76fdcb 285 tmp = BIT((8 + ep->num)); /* completion */
fae3c158
RRD
286 tmp |= readl(&dev->regs->pciirqenb1);
287 writel(tmp, &dev->regs->pciirqenb1);
1da177e4
LT
288
289 /* for short OUT transfers, dma completions can't
290 * advance the queue; do it pio-style, by hand.
291 * NOTE erratum 0112 workaround #2
292 */
293 if ((desc->bEndpointAddress & USB_DIR_IN) == 0) {
3e76fdcb 294 tmp = BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT_ENABLE);
fae3c158 295 writel(tmp, &ep->regs->ep_irqenb);
1da177e4 296
adc82f77 297 enable_pciirqenb(ep);
1da177e4
LT
298 }
299 }
300
301 tmp = desc->bEndpointAddress;
e56e69cc 302 ep_dbg(dev, "enabled %s (ep%d%s-%s) %s max %04x\n",
fae3c158
RRD
303 _ep->name, tmp & 0x0f, DIR_STRING(tmp),
304 type_string(desc->bmAttributes),
1da177e4
LT
305 ep->dma ? "dma" : "pio", max);
306
307 /* pci writes may still be posted */
fae3c158 308 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
309 return 0;
310}
311
fae3c158 312static int handshake(u32 __iomem *ptr, u32 mask, u32 done, int usec)
1da177e4
LT
313{
314 u32 result;
315
316 do {
fae3c158 317 result = readl(ptr);
1da177e4
LT
318 if (result == ~(u32)0) /* "device unplugged" */
319 return -ENODEV;
320 result &= mask;
321 if (result == done)
322 return 0;
fae3c158 323 udelay(1);
1da177e4
LT
324 usec--;
325 } while (usec > 0);
326 return -ETIMEDOUT;
327}
328
901b3d75 329static const struct usb_ep_ops net2280_ep_ops;
1da177e4 330
adc82f77
RRD
331static void ep_reset_228x(struct net2280_regs __iomem *regs,
332 struct net2280_ep *ep)
1da177e4
LT
333{
334 u32 tmp;
335
336 ep->desc = NULL;
fae3c158 337 INIT_LIST_HEAD(&ep->queue);
1da177e4 338
e117e742 339 usb_ep_set_maxpacket_limit(&ep->ep, ~0);
1da177e4
LT
340 ep->ep.ops = &net2280_ep_ops;
341
342 /* disable the dma, irqs, endpoint... */
343 if (ep->dma) {
fae3c158 344 writel(0, &ep->dma->dmactl);
3e76fdcb
RRD
345 writel(BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
346 BIT(DMA_TRANSACTION_DONE_INTERRUPT) |
347 BIT(DMA_ABORT),
348 &ep->dma->dmastat);
1da177e4 349
fae3c158 350 tmp = readl(&regs->pciirqenb0);
3e76fdcb 351 tmp &= ~BIT(ep->num);
fae3c158 352 writel(tmp, &regs->pciirqenb0);
1da177e4 353 } else {
fae3c158 354 tmp = readl(&regs->pciirqenb1);
3e76fdcb 355 tmp &= ~BIT((8 + ep->num)); /* completion */
fae3c158 356 writel(tmp, &regs->pciirqenb1);
1da177e4 357 }
fae3c158 358 writel(0, &ep->regs->ep_irqenb);
1da177e4
LT
359
360 /* init to our chosen defaults, notably so that we NAK OUT
361 * packets until the driver queues a read (+note erratum 0112)
362 */
2eeb0016 363 if (!ep->is_in || (ep->dev->quirks & PLX_2280)) {
3e76fdcb
RRD
364 tmp = BIT(SET_NAK_OUT_PACKETS_MODE) |
365 BIT(SET_NAK_OUT_PACKETS) |
366 BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
367 BIT(CLEAR_INTERRUPT_MODE);
950ee4c8
GL
368 } else {
369 /* added for 2282 */
3e76fdcb
RRD
370 tmp = BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
371 BIT(CLEAR_NAK_OUT_PACKETS) |
372 BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
373 BIT(CLEAR_INTERRUPT_MODE);
950ee4c8 374 }
1da177e4
LT
375
376 if (ep->num != 0) {
3e76fdcb
RRD
377 tmp |= BIT(CLEAR_ENDPOINT_TOGGLE) |
378 BIT(CLEAR_ENDPOINT_HALT);
1da177e4 379 }
fae3c158 380 writel(tmp, &ep->regs->ep_rsp);
1da177e4
LT
381
382 /* scrub most status bits, and flush any fifo state */
2eeb0016 383 if (ep->dev->quirks & PLX_2280)
3e76fdcb
RRD
384 tmp = BIT(FIFO_OVERFLOW) |
385 BIT(FIFO_UNDERFLOW);
950ee4c8
GL
386 else
387 tmp = 0;
388
3e76fdcb
RRD
389 writel(tmp | BIT(TIMEOUT) |
390 BIT(USB_STALL_SENT) |
391 BIT(USB_IN_NAK_SENT) |
392 BIT(USB_IN_ACK_RCVD) |
393 BIT(USB_OUT_PING_NAK_SENT) |
394 BIT(USB_OUT_ACK_SENT) |
395 BIT(FIFO_FLUSH) |
396 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
397 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
398 BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
399 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
400 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
ae8e530a
RRD
401 BIT(DATA_IN_TOKEN_INTERRUPT),
402 &ep->regs->ep_stat);
1da177e4
LT
403
404 /* fifo size is handled separately */
405}
406
adc82f77
RRD
407static void ep_reset_338x(struct net2280_regs __iomem *regs,
408 struct net2280_ep *ep)
409{
410 u32 tmp, dmastat;
411
412 ep->desc = NULL;
413 INIT_LIST_HEAD(&ep->queue);
414
415 usb_ep_set_maxpacket_limit(&ep->ep, ~0);
416 ep->ep.ops = &net2280_ep_ops;
417
418 /* disable the dma, irqs, endpoint... */
419 if (ep->dma) {
420 writel(0, &ep->dma->dmactl);
3e76fdcb
RRD
421 writel(BIT(DMA_ABORT_DONE_INTERRUPT) |
422 BIT(DMA_PAUSE_DONE_INTERRUPT) |
423 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
ae8e530a
RRD
424 BIT(DMA_TRANSACTION_DONE_INTERRUPT),
425 /* | BIT(DMA_ABORT), */
426 &ep->dma->dmastat);
adc82f77
RRD
427
428 dmastat = readl(&ep->dma->dmastat);
429 if (dmastat == 0x5002) {
e56e69cc 430 ep_warn(ep->dev, "The dmastat return = %x!!\n",
adc82f77
RRD
431 dmastat);
432 writel(0x5a, &ep->dma->dmastat);
433 }
434
435 tmp = readl(&regs->pciirqenb0);
3e76fdcb 436 tmp &= ~BIT(ep_bit[ep->num]);
adc82f77
RRD
437 writel(tmp, &regs->pciirqenb0);
438 } else {
439 if (ep->num < 5) {
440 tmp = readl(&regs->pciirqenb1);
3e76fdcb 441 tmp &= ~BIT((8 + ep->num)); /* completion */
adc82f77
RRD
442 writel(tmp, &regs->pciirqenb1);
443 }
444 }
445 writel(0, &ep->regs->ep_irqenb);
446
3e76fdcb
RRD
447 writel(BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
448 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
449 BIT(FIFO_OVERFLOW) |
450 BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
451 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
452 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
453 BIT(DATA_IN_TOKEN_INTERRUPT), &ep->regs->ep_stat);
adc82f77
RRD
454}
455
fae3c158 456static void nuke(struct net2280_ep *);
1da177e4 457
fae3c158 458static int net2280_disable(struct usb_ep *_ep)
1da177e4
LT
459{
460 struct net2280_ep *ep;
461 unsigned long flags;
462
fae3c158 463 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
464 if (!_ep || !ep->desc || _ep->name == ep0name)
465 return -EINVAL;
466
fae3c158
RRD
467 spin_lock_irqsave(&ep->dev->lock, flags);
468 nuke(ep);
adc82f77 469
2eeb0016 470 if (ep->dev->quirks & PLX_SUPERSPEED)
adc82f77
RRD
471 ep_reset_338x(ep->dev->regs, ep);
472 else
473 ep_reset_228x(ep->dev->regs, ep);
1da177e4 474
e56e69cc 475 ep_vdbg(ep->dev, "disabled %s %s\n",
1da177e4
LT
476 ep->dma ? "dma" : "pio", _ep->name);
477
478 /* synch memory views with the device */
adc82f77 479 (void)readl(&ep->cfg->ep_cfg);
1da177e4
LT
480
481 if (use_dma && !ep->dma && ep->num >= 1 && ep->num <= 4)
fae3c158 482 ep->dma = &ep->dev->dma[ep->num - 1];
1da177e4 483
fae3c158 484 spin_unlock_irqrestore(&ep->dev->lock, flags);
1da177e4
LT
485 return 0;
486}
487
488/*-------------------------------------------------------------------------*/
489
fae3c158
RRD
490static struct usb_request
491*net2280_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags)
1da177e4
LT
492{
493 struct net2280_ep *ep;
494 struct net2280_request *req;
495
496 if (!_ep)
497 return NULL;
fae3c158 498 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4 499
7039f422 500 req = kzalloc(sizeof(*req), gfp_flags);
1da177e4
LT
501 if (!req)
502 return NULL;
503
fae3c158 504 INIT_LIST_HEAD(&req->queue);
1da177e4
LT
505
506 /* this dma descriptor may be swapped with the previous dummy */
507 if (ep->dma) {
508 struct net2280_dma *td;
509
fae3c158 510 td = pci_pool_alloc(ep->dev->requests, gfp_flags,
1da177e4
LT
511 &req->td_dma);
512 if (!td) {
fae3c158 513 kfree(req);
1da177e4
LT
514 return NULL;
515 }
516 td->dmacount = 0; /* not VALID */
1da177e4
LT
517 td->dmadesc = td->dmaaddr;
518 req->td = td;
519 }
520 return &req->req;
521}
522
fae3c158 523static void net2280_free_request(struct usb_ep *_ep, struct usb_request *_req)
1da177e4
LT
524{
525 struct net2280_ep *ep;
526 struct net2280_request *req;
527
fae3c158 528 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
529 if (!_ep || !_req)
530 return;
531
fae3c158
RRD
532 req = container_of(_req, struct net2280_request, req);
533 WARN_ON(!list_empty(&req->queue));
1da177e4 534 if (req->td)
fae3c158
RRD
535 pci_pool_free(ep->dev->requests, req->td, req->td_dma);
536 kfree(req);
1da177e4
LT
537}
538
539/*-------------------------------------------------------------------------*/
540
1da177e4
LT
541/* load a packet into the fifo we use for usb IN transfers.
542 * works for all endpoints.
543 *
544 * NOTE: pio with ep-a..ep-d could stuff multiple packets into the fifo
545 * at a time, but this code is simpler because it knows it only writes
546 * one packet. ep-a..ep-d should use dma instead.
547 */
fae3c158 548static void write_fifo(struct net2280_ep *ep, struct usb_request *req)
1da177e4
LT
549{
550 struct net2280_ep_regs __iomem *regs = ep->regs;
551 u8 *buf;
552 u32 tmp;
553 unsigned count, total;
554
555 /* INVARIANT: fifo is currently empty. (testable) */
556
557 if (req) {
558 buf = req->buf + req->actual;
fae3c158 559 prefetch(buf);
1da177e4
LT
560 total = req->length - req->actual;
561 } else {
562 total = 0;
563 buf = NULL;
564 }
565
566 /* write just one packet at a time */
567 count = ep->ep.maxpacket;
568 if (count > total) /* min() cannot be used on a bitfield */
569 count = total;
570
e56e69cc 571 ep_vdbg(ep->dev, "write %s fifo (IN) %d bytes%s req %p\n",
1da177e4
LT
572 ep->ep.name, count,
573 (count != ep->ep.maxpacket) ? " (short)" : "",
574 req);
575 while (count >= 4) {
576 /* NOTE be careful if you try to align these. fifo lines
577 * should normally be full (4 bytes) and successive partial
578 * lines are ok only in certain cases.
579 */
fae3c158
RRD
580 tmp = get_unaligned((u32 *)buf);
581 cpu_to_le32s(&tmp);
582 writel(tmp, &regs->ep_data);
1da177e4
LT
583 buf += 4;
584 count -= 4;
585 }
586
587 /* last fifo entry is "short" unless we wrote a full packet.
588 * also explicitly validate last word in (periodic) transfers
589 * when maxpacket is not a multiple of 4 bytes.
590 */
591 if (count || total < ep->ep.maxpacket) {
fae3c158
RRD
592 tmp = count ? get_unaligned((u32 *)buf) : count;
593 cpu_to_le32s(&tmp);
594 set_fifo_bytecount(ep, count & 0x03);
595 writel(tmp, &regs->ep_data);
1da177e4
LT
596 }
597
598 /* pci writes may still be posted */
599}
600
601/* work around erratum 0106: PCI and USB race over the OUT fifo.
602 * caller guarantees chiprev 0100, out endpoint is NAKing, and
603 * there's no real data in the fifo.
604 *
605 * NOTE: also used in cases where that erratum doesn't apply:
606 * where the host wrote "too much" data to us.
607 */
fae3c158 608static void out_flush(struct net2280_ep *ep)
1da177e4
LT
609{
610 u32 __iomem *statp;
611 u32 tmp;
612
fae3c158 613 ASSERT_OUT_NAKING(ep);
1da177e4
LT
614
615 statp = &ep->regs->ep_stat;
3e76fdcb 616 writel(BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
ae8e530a
RRD
617 BIT(DATA_PACKET_RECEIVED_INTERRUPT),
618 statp);
3e76fdcb 619 writel(BIT(FIFO_FLUSH), statp);
fae3c158
RRD
620 /* Make sure that stap is written */
621 mb();
622 tmp = readl(statp);
ae8e530a 623 if (tmp & BIT(DATA_OUT_PING_TOKEN_INTERRUPT) &&
1da177e4 624 /* high speed did bulk NYET; fifo isn't filling */
ae8e530a 625 ep->dev->gadget.speed == USB_SPEED_FULL) {
1da177e4
LT
626 unsigned usec;
627
628 usec = 50; /* 64 byte bulk/interrupt */
3e76fdcb
RRD
629 handshake(statp, BIT(USB_OUT_PING_NAK_SENT),
630 BIT(USB_OUT_PING_NAK_SENT), usec);
1da177e4
LT
631 /* NAK done; now CLEAR_NAK_OUT_PACKETS is safe */
632 }
633}
634
635/* unload packet(s) from the fifo we use for usb OUT transfers.
636 * returns true iff the request completed, because of short packet
637 * or the request buffer having filled with full packets.
638 *
639 * for ep-a..ep-d this will read multiple packets out when they
640 * have been accepted.
641 */
fae3c158 642static int read_fifo(struct net2280_ep *ep, struct net2280_request *req)
1da177e4
LT
643{
644 struct net2280_ep_regs __iomem *regs = ep->regs;
645 u8 *buf = req->req.buf + req->req.actual;
646 unsigned count, tmp, is_short;
647 unsigned cleanup = 0, prevent = 0;
648
649 /* erratum 0106 ... packets coming in during fifo reads might
650 * be incompletely rejected. not all cases have workarounds.
651 */
ae8e530a
RRD
652 if (ep->dev->chiprev == 0x0100 &&
653 ep->dev->gadget.speed == USB_SPEED_FULL) {
fae3c158
RRD
654 udelay(1);
655 tmp = readl(&ep->regs->ep_stat);
3e76fdcb 656 if ((tmp & BIT(NAK_OUT_PACKETS)))
1da177e4 657 cleanup = 1;
3e76fdcb 658 else if ((tmp & BIT(FIFO_FULL))) {
fae3c158 659 start_out_naking(ep);
1da177e4
LT
660 prevent = 1;
661 }
662 /* else: hope we don't see the problem */
663 }
664
665 /* never overflow the rx buffer. the fifo reads packets until
666 * it sees a short one; we might not be ready for them all.
667 */
fae3c158
RRD
668 prefetchw(buf);
669 count = readl(&regs->ep_avail);
670 if (unlikely(count == 0)) {
671 udelay(1);
672 tmp = readl(&ep->regs->ep_stat);
673 count = readl(&regs->ep_avail);
1da177e4 674 /* handled that data already? */
3e76fdcb 675 if (count == 0 && (tmp & BIT(NAK_OUT_PACKETS)) == 0)
1da177e4
LT
676 return 0;
677 }
678
679 tmp = req->req.length - req->req.actual;
680 if (count > tmp) {
681 /* as with DMA, data overflow gets flushed */
682 if ((tmp % ep->ep.maxpacket) != 0) {
e56e69cc 683 ep_err(ep->dev,
1da177e4
LT
684 "%s out fifo %d bytes, expected %d\n",
685 ep->ep.name, count, tmp);
686 req->req.status = -EOVERFLOW;
687 cleanup = 1;
688 /* NAK_OUT_PACKETS will be set, so flushing is safe;
689 * the next read will start with the next packet
690 */
691 } /* else it's a ZLP, no worries */
692 count = tmp;
693 }
694 req->req.actual += count;
695
696 is_short = (count == 0) || ((count % ep->ep.maxpacket) != 0);
697
e56e69cc 698 ep_vdbg(ep->dev, "read %s fifo (OUT) %d bytes%s%s%s req %p %d/%d\n",
1da177e4
LT
699 ep->ep.name, count, is_short ? " (short)" : "",
700 cleanup ? " flush" : "", prevent ? " nak" : "",
701 req, req->req.actual, req->req.length);
702
703 while (count >= 4) {
fae3c158
RRD
704 tmp = readl(&regs->ep_data);
705 cpu_to_le32s(&tmp);
706 put_unaligned(tmp, (u32 *)buf);
1da177e4
LT
707 buf += 4;
708 count -= 4;
709 }
710 if (count) {
fae3c158 711 tmp = readl(&regs->ep_data);
1da177e4
LT
712 /* LE conversion is implicit here: */
713 do {
714 *buf++ = (u8) tmp;
715 tmp >>= 8;
716 } while (--count);
717 }
718 if (cleanup)
fae3c158 719 out_flush(ep);
1da177e4 720 if (prevent) {
3e76fdcb 721 writel(BIT(CLEAR_NAK_OUT_PACKETS), &ep->regs->ep_rsp);
fae3c158 722 (void) readl(&ep->regs->ep_rsp);
1da177e4
LT
723 }
724
ae8e530a
RRD
725 return is_short || ((req->req.actual == req->req.length) &&
726 !req->req.zero);
1da177e4
LT
727}
728
729/* fill out dma descriptor to match a given request */
fae3c158
RRD
730static void fill_dma_desc(struct net2280_ep *ep,
731 struct net2280_request *req, int valid)
1da177e4
LT
732{
733 struct net2280_dma *td = req->td;
734 u32 dmacount = req->req.length;
735
736 /* don't let DMA continue after a short OUT packet,
737 * so overruns can't affect the next transfer.
738 * in case of overruns on max-size packets, we can't
739 * stop the fifo from filling but we can flush it.
740 */
741 if (ep->is_in)
3e76fdcb 742 dmacount |= BIT(DMA_DIRECTION);
ae8e530a 743 if ((!ep->is_in && (dmacount % ep->ep.maxpacket) != 0) ||
2eeb0016 744 !(ep->dev->quirks & PLX_2280))
3e76fdcb 745 dmacount |= BIT(END_OF_CHAIN);
1da177e4
LT
746
747 req->valid = valid;
748 if (valid)
3e76fdcb 749 dmacount |= BIT(VALID_BIT);
1da177e4 750 if (likely(!req->req.no_interrupt || !use_dma_chaining))
3e76fdcb 751 dmacount |= BIT(DMA_DONE_INTERRUPT_ENABLE);
1da177e4
LT
752
753 /* td->dmadesc = previously set by caller */
754 td->dmaaddr = cpu_to_le32 (req->req.dma);
755
756 /* 2280 may be polling VALID_BIT through ep->dma->dmadesc */
fae3c158 757 wmb();
da2bbdcc 758 td->dmacount = cpu_to_le32(dmacount);
1da177e4
LT
759}
760
761static const u32 dmactl_default =
3e76fdcb
RRD
762 BIT(DMA_SCATTER_GATHER_DONE_INTERRUPT) |
763 BIT(DMA_CLEAR_COUNT_ENABLE) |
1da177e4 764 /* erratum 0116 workaround part 1 (use POLLING) */
3e76fdcb
RRD
765 (POLL_100_USEC << DESCRIPTOR_POLLING_RATE) |
766 BIT(DMA_VALID_BIT_POLLING_ENABLE) |
767 BIT(DMA_VALID_BIT_ENABLE) |
768 BIT(DMA_SCATTER_GATHER_ENABLE) |
1da177e4 769 /* erratum 0116 workaround part 2 (no AUTOSTART) */
3e76fdcb 770 BIT(DMA_ENABLE);
1da177e4 771
fae3c158 772static inline void spin_stop_dma(struct net2280_dma_regs __iomem *dma)
1da177e4 773{
3e76fdcb 774 handshake(&dma->dmactl, BIT(DMA_ENABLE), 0, 50);
1da177e4
LT
775}
776
fae3c158 777static inline void stop_dma(struct net2280_dma_regs __iomem *dma)
1da177e4 778{
3e76fdcb 779 writel(readl(&dma->dmactl) & ~BIT(DMA_ENABLE), &dma->dmactl);
fae3c158 780 spin_stop_dma(dma);
1da177e4
LT
781}
782
fae3c158 783static void start_queue(struct net2280_ep *ep, u32 dmactl, u32 td_dma)
1da177e4
LT
784{
785 struct net2280_dma_regs __iomem *dma = ep->dma;
3e76fdcb 786 unsigned int tmp = BIT(VALID_BIT) | (ep->is_in << DMA_DIRECTION);
1da177e4 787
2eeb0016 788 if (!(ep->dev->quirks & PLX_2280))
3e76fdcb 789 tmp |= BIT(END_OF_CHAIN);
950ee4c8 790
fae3c158
RRD
791 writel(tmp, &dma->dmacount);
792 writel(readl(&dma->dmastat), &dma->dmastat);
1da177e4 793
fae3c158 794 writel(td_dma, &dma->dmadesc);
2eeb0016 795 if (ep->dev->quirks & PLX_SUPERSPEED)
3e76fdcb 796 dmactl |= BIT(DMA_REQUEST_OUTSTANDING);
fae3c158 797 writel(dmactl, &dma->dmactl);
1da177e4
LT
798
799 /* erratum 0116 workaround part 3: pci arbiter away from net2280 */
fae3c158 800 (void) readl(&ep->dev->pci->pcimstctl);
1da177e4 801
3e76fdcb 802 writel(BIT(DMA_START), &dma->dmastat);
1da177e4
LT
803
804 if (!ep->is_in)
fae3c158 805 stop_out_naking(ep);
1da177e4
LT
806}
807
fae3c158 808static void start_dma(struct net2280_ep *ep, struct net2280_request *req)
1da177e4
LT
809{
810 u32 tmp;
811 struct net2280_dma_regs __iomem *dma = ep->dma;
812
813 /* FIXME can't use DMA for ZLPs */
814
815 /* on this path we "know" there's no dma active (yet) */
3e76fdcb 816 WARN_ON(readl(&dma->dmactl) & BIT(DMA_ENABLE));
fae3c158 817 writel(0, &ep->dma->dmactl);
1da177e4
LT
818
819 /* previous OUT packet might have been short */
fae3c158
RRD
820 if (!ep->is_in && (readl(&ep->regs->ep_stat) &
821 BIT(NAK_OUT_PACKETS))) {
3e76fdcb 822 writel(BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT),
1da177e4
LT
823 &ep->regs->ep_stat);
824
fae3c158 825 tmp = readl(&ep->regs->ep_avail);
1da177e4 826 if (tmp) {
fae3c158 827 writel(readl(&dma->dmastat), &dma->dmastat);
1da177e4
LT
828
829 /* transfer all/some fifo data */
fae3c158
RRD
830 writel(req->req.dma, &dma->dmaaddr);
831 tmp = min(tmp, req->req.length);
1da177e4
LT
832
833 /* dma irq, faking scatterlist status */
fae3c158 834 req->td->dmacount = cpu_to_le32(req->req.length - tmp);
ae8e530a
RRD
835 writel(BIT(DMA_DONE_INTERRUPT_ENABLE) | tmp,
836 &dma->dmacount);
1da177e4
LT
837 req->td->dmadesc = 0;
838 req->valid = 1;
839
3e76fdcb
RRD
840 writel(BIT(DMA_ENABLE), &dma->dmactl);
841 writel(BIT(DMA_START), &dma->dmastat);
1da177e4
LT
842 return;
843 }
844 }
845
846 tmp = dmactl_default;
847
848 /* force packet boundaries between dma requests, but prevent the
849 * controller from automagically writing a last "short" packet
850 * (zero length) unless the driver explicitly said to do that.
851 */
852 if (ep->is_in) {
fae3c158
RRD
853 if (likely((req->req.length % ep->ep.maxpacket) ||
854 req->req.zero)){
3e76fdcb 855 tmp |= BIT(DMA_FIFO_VALIDATE);
1da177e4
LT
856 ep->in_fifo_validate = 1;
857 } else
858 ep->in_fifo_validate = 0;
859 }
860
861 /* init req->td, pointing to the current dummy */
862 req->td->dmadesc = cpu_to_le32 (ep->td_dma);
fae3c158 863 fill_dma_desc(ep, req, 1);
1da177e4
LT
864
865 if (!use_dma_chaining)
3e76fdcb 866 req->td->dmacount |= cpu_to_le32(BIT(END_OF_CHAIN));
1da177e4 867
fae3c158 868 start_queue(ep, tmp, req->td_dma);
1da177e4
LT
869}
870
adc82f77
RRD
871static inline void resume_dma(struct net2280_ep *ep)
872{
3e76fdcb 873 writel(readl(&ep->dma->dmactl) | BIT(DMA_ENABLE), &ep->dma->dmactl);
adc82f77
RRD
874
875 ep->dma_started = true;
876}
877
878static inline void ep_stop_dma(struct net2280_ep *ep)
879{
3e76fdcb 880 writel(readl(&ep->dma->dmactl) & ~BIT(DMA_ENABLE), &ep->dma->dmactl);
adc82f77
RRD
881 spin_stop_dma(ep->dma);
882
883 ep->dma_started = false;
884}
885
1da177e4 886static inline void
fae3c158 887queue_dma(struct net2280_ep *ep, struct net2280_request *req, int valid)
1da177e4
LT
888{
889 struct net2280_dma *end;
890 dma_addr_t tmp;
891
892 /* swap new dummy for old, link; fill and maybe activate */
893 end = ep->dummy;
894 ep->dummy = req->td;
895 req->td = end;
896
897 tmp = ep->td_dma;
898 ep->td_dma = req->td_dma;
899 req->td_dma = tmp;
900
901 end->dmadesc = cpu_to_le32 (ep->td_dma);
902
fae3c158 903 fill_dma_desc(ep, req, valid);
1da177e4
LT
904}
905
906static void
fae3c158 907done(struct net2280_ep *ep, struct net2280_request *req, int status)
1da177e4
LT
908{
909 struct net2280 *dev;
910 unsigned stopped = ep->stopped;
911
fae3c158 912 list_del_init(&req->queue);
1da177e4
LT
913
914 if (req->req.status == -EINPROGRESS)
915 req->req.status = status;
916 else
917 status = req->req.status;
918
919 dev = ep->dev;
ae4d7933
FB
920 if (ep->dma)
921 usb_gadget_unmap_request(&dev->gadget, &req->req, ep->is_in);
1da177e4
LT
922
923 if (status && status != -ESHUTDOWN)
e56e69cc 924 ep_vdbg(dev, "complete %s req %p stat %d len %u/%u\n",
1da177e4
LT
925 ep->ep.name, &req->req, status,
926 req->req.actual, req->req.length);
927
928 /* don't modify queue heads during completion callback */
929 ep->stopped = 1;
fae3c158 930 spin_unlock(&dev->lock);
304f7e5e 931 usb_gadget_giveback_request(&ep->ep, &req->req);
fae3c158 932 spin_lock(&dev->lock);
1da177e4
LT
933 ep->stopped = stopped;
934}
935
936/*-------------------------------------------------------------------------*/
937
938static int
fae3c158 939net2280_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags)
1da177e4
LT
940{
941 struct net2280_request *req;
942 struct net2280_ep *ep;
943 struct net2280 *dev;
944 unsigned long flags;
945
946 /* we always require a cpu-view buffer, so that we can
947 * always use pio (as fallback or whatever).
948 */
fae3c158
RRD
949 req = container_of(_req, struct net2280_request, req);
950 if (!_req || !_req->complete || !_req->buf ||
951 !list_empty(&req->queue))
1da177e4
LT
952 return -EINVAL;
953 if (_req->length > (~0 & DMA_BYTE_COUNT_MASK))
954 return -EDOM;
fae3c158 955 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
956 if (!_ep || (!ep->desc && ep->num != 0))
957 return -EINVAL;
958 dev = ep->dev;
959 if (!dev->driver || dev->gadget.speed == USB_SPEED_UNKNOWN)
960 return -ESHUTDOWN;
961
962 /* FIXME implement PIO fallback for ZLPs with DMA */
963 if (ep->dma && _req->length == 0)
964 return -EOPNOTSUPP;
965
966 /* set up dma mapping in case the caller didn't */
ae4d7933
FB
967 if (ep->dma) {
968 int ret;
969
970 ret = usb_gadget_map_request(&dev->gadget, _req,
971 ep->is_in);
972 if (ret)
973 return ret;
1da177e4
LT
974 }
975
976#if 0
e56e69cc 977 ep_vdbg(dev, "%s queue req %p, len %d buf %p\n",
1da177e4
LT
978 _ep->name, _req, _req->length, _req->buf);
979#endif
980
fae3c158 981 spin_lock_irqsave(&dev->lock, flags);
1da177e4
LT
982
983 _req->status = -EINPROGRESS;
984 _req->actual = 0;
985
986 /* kickstart this i/o queue? */
fae3c158 987 if (list_empty(&ep->queue) && !ep->stopped) {
adc82f77
RRD
988 /* DMA request while EP halted */
989 if (ep->dma &&
3e76fdcb 990 (readl(&ep->regs->ep_rsp) & BIT(CLEAR_ENDPOINT_HALT)) &&
2eeb0016 991 (dev->quirks & PLX_SUPERSPEED)) {
adc82f77
RRD
992 int valid = 1;
993 if (ep->is_in) {
994 int expect;
995 expect = likely(req->req.zero ||
996 ((req->req.length %
997 ep->ep.maxpacket) != 0));
998 if (expect != ep->in_fifo_validate)
999 valid = 0;
1000 }
1001 queue_dma(ep, req, valid);
1002 }
1da177e4 1003 /* use DMA if the endpoint supports it, else pio */
adc82f77 1004 else if (ep->dma)
fae3c158 1005 start_dma(ep, req);
1da177e4
LT
1006 else {
1007 /* maybe there's no control data, just status ack */
1008 if (ep->num == 0 && _req->length == 0) {
fae3c158
RRD
1009 allow_status(ep);
1010 done(ep, req, 0);
e56e69cc 1011 ep_vdbg(dev, "%s status ack\n", ep->ep.name);
1da177e4
LT
1012 goto done;
1013 }
1014
1015 /* PIO ... stuff the fifo, or unblock it. */
1016 if (ep->is_in)
fae3c158
RRD
1017 write_fifo(ep, _req);
1018 else if (list_empty(&ep->queue)) {
1da177e4
LT
1019 u32 s;
1020
1021 /* OUT FIFO might have packet(s) buffered */
fae3c158 1022 s = readl(&ep->regs->ep_stat);
3e76fdcb 1023 if ((s & BIT(FIFO_EMPTY)) == 0) {
1da177e4
LT
1024 /* note: _req->short_not_ok is
1025 * ignored here since PIO _always_
1026 * stops queue advance here, and
1027 * _req->status doesn't change for
1028 * short reads (only _req->actual)
1029 */
fae3c158
RRD
1030 if (read_fifo(ep, req) &&
1031 ep->num == 0) {
1032 done(ep, req, 0);
1033 allow_status(ep);
1da177e4
LT
1034 /* don't queue it */
1035 req = NULL;
fae3c158
RRD
1036 } else if (read_fifo(ep, req) &&
1037 ep->num != 0) {
1038 done(ep, req, 0);
1039 req = NULL;
1da177e4 1040 } else
fae3c158 1041 s = readl(&ep->regs->ep_stat);
1da177e4
LT
1042 }
1043
1044 /* don't NAK, let the fifo fill */
3e76fdcb
RRD
1045 if (req && (s & BIT(NAK_OUT_PACKETS)))
1046 writel(BIT(CLEAR_NAK_OUT_PACKETS),
1da177e4
LT
1047 &ep->regs->ep_rsp);
1048 }
1049 }
1050
1051 } else if (ep->dma) {
1052 int valid = 1;
1053
1054 if (ep->is_in) {
1055 int expect;
1056
1057 /* preventing magic zlps is per-engine state, not
1058 * per-transfer; irq logic must recover hiccups.
1059 */
fae3c158
RRD
1060 expect = likely(req->req.zero ||
1061 (req->req.length % ep->ep.maxpacket));
1da177e4
LT
1062 if (expect != ep->in_fifo_validate)
1063 valid = 0;
1064 }
fae3c158 1065 queue_dma(ep, req, valid);
1da177e4
LT
1066
1067 } /* else the irq handler advances the queue. */
1068
1f26e28d 1069 ep->responded = 1;
1da177e4 1070 if (req)
fae3c158 1071 list_add_tail(&req->queue, &ep->queue);
1da177e4 1072done:
fae3c158 1073 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1074
1075 /* pci writes may still be posted */
1076 return 0;
1077}
1078
1079static inline void
fae3c158
RRD
1080dma_done(struct net2280_ep *ep, struct net2280_request *req, u32 dmacount,
1081 int status)
1da177e4
LT
1082{
1083 req->req.actual = req->req.length - (DMA_BYTE_COUNT_MASK & dmacount);
fae3c158 1084 done(ep, req, status);
1da177e4
LT
1085}
1086
fae3c158 1087static void restart_dma(struct net2280_ep *ep);
1da177e4 1088
fae3c158 1089static void scan_dma_completions(struct net2280_ep *ep)
1da177e4
LT
1090{
1091 /* only look at descriptors that were "naturally" retired,
1092 * so fifo and list head state won't matter
1093 */
fae3c158 1094 while (!list_empty(&ep->queue)) {
1da177e4
LT
1095 struct net2280_request *req;
1096 u32 tmp;
1097
fae3c158 1098 req = list_entry(ep->queue.next,
1da177e4
LT
1099 struct net2280_request, queue);
1100 if (!req->valid)
1101 break;
fae3c158
RRD
1102 rmb();
1103 tmp = le32_to_cpup(&req->td->dmacount);
3e76fdcb 1104 if ((tmp & BIT(VALID_BIT)) != 0)
1da177e4
LT
1105 break;
1106
1107 /* SHORT_PACKET_TRANSFERRED_INTERRUPT handles "usb-short"
1108 * cases where DMA must be aborted; this code handles
1109 * all non-abort DMA completions.
1110 */
fae3c158 1111 if (unlikely(req->td->dmadesc == 0)) {
1da177e4 1112 /* paranoia */
fae3c158 1113 tmp = readl(&ep->dma->dmacount);
1da177e4
LT
1114 if (tmp & DMA_BYTE_COUNT_MASK)
1115 break;
1116 /* single transfer mode */
fae3c158 1117 dma_done(ep, req, tmp, 0);
1da177e4 1118 break;
ae8e530a
RRD
1119 } else if (!ep->is_in &&
1120 (req->req.length % ep->ep.maxpacket) != 0) {
fae3c158 1121 tmp = readl(&ep->regs->ep_stat);
2eeb0016 1122 if (ep->dev->quirks & PLX_SUPERSPEED)
adc82f77 1123 return dma_done(ep, req, tmp, 0);
1da177e4
LT
1124
1125 /* AVOID TROUBLE HERE by not issuing short reads from
1126 * your gadget driver. That helps avoids errata 0121,
1127 * 0122, and 0124; not all cases trigger the warning.
1128 */
3e76fdcb 1129 if ((tmp & BIT(NAK_OUT_PACKETS)) == 0) {
e56e69cc 1130 ep_warn(ep->dev, "%s lost packet sync!\n",
1da177e4
LT
1131 ep->ep.name);
1132 req->req.status = -EOVERFLOW;
fae3c158
RRD
1133 } else {
1134 tmp = readl(&ep->regs->ep_avail);
1135 if (tmp) {
1136 /* fifo gets flushed later */
1137 ep->out_overflow = 1;
e56e69cc 1138 ep_dbg(ep->dev,
fae3c158 1139 "%s dma, discard %d len %d\n",
1da177e4
LT
1140 ep->ep.name, tmp,
1141 req->req.length);
fae3c158
RRD
1142 req->req.status = -EOVERFLOW;
1143 }
1da177e4
LT
1144 }
1145 }
fae3c158 1146 dma_done(ep, req, tmp, 0);
1da177e4
LT
1147 }
1148}
1149
fae3c158 1150static void restart_dma(struct net2280_ep *ep)
1da177e4
LT
1151{
1152 struct net2280_request *req;
1153 u32 dmactl = dmactl_default;
1154
1155 if (ep->stopped)
1156 return;
fae3c158 1157 req = list_entry(ep->queue.next, struct net2280_request, queue);
1da177e4
LT
1158
1159 if (!use_dma_chaining) {
fae3c158 1160 start_dma(ep, req);
1da177e4
LT
1161 return;
1162 }
1163
1164 /* the 2280 will be processing the queue unless queue hiccups after
1165 * the previous transfer:
1166 * IN: wanted automagic zlp, head doesn't (or vice versa)
1167 * DMA_FIFO_VALIDATE doesn't init from dma descriptors.
1168 * OUT: was "usb-short", we must restart.
1169 */
1170 if (ep->is_in && !req->valid) {
1171 struct net2280_request *entry, *prev = NULL;
1172 int reqmode, done = 0;
1173
e56e69cc 1174 ep_dbg(ep->dev, "%s dma hiccup td %p\n", ep->ep.name, req->td);
fae3c158
RRD
1175 ep->in_fifo_validate = likely(req->req.zero ||
1176 (req->req.length % ep->ep.maxpacket) != 0);
1da177e4 1177 if (ep->in_fifo_validate)
3e76fdcb 1178 dmactl |= BIT(DMA_FIFO_VALIDATE);
fae3c158 1179 list_for_each_entry(entry, &ep->queue, queue) {
320f3459 1180 __le32 dmacount;
1da177e4
LT
1181
1182 if (entry == req)
1183 continue;
1184 dmacount = entry->td->dmacount;
1185 if (!done) {
fae3c158
RRD
1186 reqmode = likely(entry->req.zero ||
1187 (entry->req.length % ep->ep.maxpacket));
1da177e4
LT
1188 if (reqmode == ep->in_fifo_validate) {
1189 entry->valid = 1;
1190 dmacount |= valid_bit;
1191 entry->td->dmacount = dmacount;
1192 prev = entry;
1193 continue;
1194 } else {
1195 /* force a hiccup */
1196 prev->td->dmacount |= dma_done_ie;
1197 done = 1;
1198 }
1199 }
1200
1201 /* walk the rest of the queue so unlinks behave */
1202 entry->valid = 0;
1203 dmacount &= ~valid_bit;
1204 entry->td->dmacount = dmacount;
1205 prev = entry;
1206 }
1207 }
1208
fae3c158
RRD
1209 writel(0, &ep->dma->dmactl);
1210 start_queue(ep, dmactl, req->td_dma);
1da177e4
LT
1211}
1212
adc82f77 1213static void abort_dma_228x(struct net2280_ep *ep)
1da177e4
LT
1214{
1215 /* abort the current transfer */
fae3c158 1216 if (likely(!list_empty(&ep->queue))) {
1da177e4 1217 /* FIXME work around errata 0121, 0122, 0124 */
3e76fdcb 1218 writel(BIT(DMA_ABORT), &ep->dma->dmastat);
fae3c158 1219 spin_stop_dma(ep->dma);
1da177e4 1220 } else
fae3c158
RRD
1221 stop_dma(ep->dma);
1222 scan_dma_completions(ep);
1da177e4
LT
1223}
1224
adc82f77
RRD
1225static void abort_dma_338x(struct net2280_ep *ep)
1226{
3e76fdcb 1227 writel(BIT(DMA_ABORT), &ep->dma->dmastat);
adc82f77
RRD
1228 spin_stop_dma(ep->dma);
1229}
1230
1231static void abort_dma(struct net2280_ep *ep)
1232{
2eeb0016 1233 if (ep->dev->quirks & PLX_LEGACY)
adc82f77
RRD
1234 return abort_dma_228x(ep);
1235 return abort_dma_338x(ep);
1236}
1237
1da177e4 1238/* dequeue ALL requests */
fae3c158 1239static void nuke(struct net2280_ep *ep)
1da177e4
LT
1240{
1241 struct net2280_request *req;
1242
1243 /* called with spinlock held */
1244 ep->stopped = 1;
1245 if (ep->dma)
fae3c158
RRD
1246 abort_dma(ep);
1247 while (!list_empty(&ep->queue)) {
1248 req = list_entry(ep->queue.next,
1da177e4
LT
1249 struct net2280_request,
1250 queue);
fae3c158 1251 done(ep, req, -ESHUTDOWN);
1da177e4
LT
1252 }
1253}
1254
1255/* dequeue JUST ONE request */
fae3c158 1256static int net2280_dequeue(struct usb_ep *_ep, struct usb_request *_req)
1da177e4
LT
1257{
1258 struct net2280_ep *ep;
1259 struct net2280_request *req;
1260 unsigned long flags;
1261 u32 dmactl;
1262 int stopped;
1263
fae3c158 1264 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
1265 if (!_ep || (!ep->desc && ep->num != 0) || !_req)
1266 return -EINVAL;
1267
fae3c158 1268 spin_lock_irqsave(&ep->dev->lock, flags);
1da177e4
LT
1269 stopped = ep->stopped;
1270
1271 /* quiesce dma while we patch the queue */
1272 dmactl = 0;
1273 ep->stopped = 1;
1274 if (ep->dma) {
fae3c158 1275 dmactl = readl(&ep->dma->dmactl);
1da177e4 1276 /* WARNING erratum 0127 may kick in ... */
fae3c158
RRD
1277 stop_dma(ep->dma);
1278 scan_dma_completions(ep);
1da177e4
LT
1279 }
1280
1281 /* make sure it's still queued on this endpoint */
fae3c158 1282 list_for_each_entry(req, &ep->queue, queue) {
1da177e4
LT
1283 if (&req->req == _req)
1284 break;
1285 }
1286 if (&req->req != _req) {
fae3c158 1287 spin_unlock_irqrestore(&ep->dev->lock, flags);
1da177e4
LT
1288 return -EINVAL;
1289 }
1290
1291 /* queue head may be partially complete. */
1292 if (ep->queue.next == &req->queue) {
1293 if (ep->dma) {
e56e69cc 1294 ep_dbg(ep->dev, "unlink (%s) dma\n", _ep->name);
1da177e4 1295 _req->status = -ECONNRESET;
fae3c158
RRD
1296 abort_dma(ep);
1297 if (likely(ep->queue.next == &req->queue)) {
1298 /* NOTE: misreports single-transfer mode*/
1da177e4 1299 req->td->dmacount = 0; /* invalidate */
fae3c158
RRD
1300 dma_done(ep, req,
1301 readl(&ep->dma->dmacount),
1da177e4
LT
1302 -ECONNRESET);
1303 }
1304 } else {
e56e69cc 1305 ep_dbg(ep->dev, "unlink (%s) pio\n", _ep->name);
fae3c158 1306 done(ep, req, -ECONNRESET);
1da177e4
LT
1307 }
1308 req = NULL;
1309
1310 /* patch up hardware chaining data */
1311 } else if (ep->dma && use_dma_chaining) {
1312 if (req->queue.prev == ep->queue.next) {
fae3c158 1313 writel(le32_to_cpu(req->td->dmadesc),
1da177e4
LT
1314 &ep->dma->dmadesc);
1315 if (req->td->dmacount & dma_done_ie)
ae8e530a
RRD
1316 writel(readl(&ep->dma->dmacount) |
1317 le32_to_cpu(dma_done_ie),
1da177e4
LT
1318 &ep->dma->dmacount);
1319 } else {
1320 struct net2280_request *prev;
1321
fae3c158 1322 prev = list_entry(req->queue.prev,
1da177e4
LT
1323 struct net2280_request, queue);
1324 prev->td->dmadesc = req->td->dmadesc;
1325 if (req->td->dmacount & dma_done_ie)
1326 prev->td->dmacount |= dma_done_ie;
1327 }
1328 }
1329
1330 if (req)
fae3c158 1331 done(ep, req, -ECONNRESET);
1da177e4
LT
1332 ep->stopped = stopped;
1333
1334 if (ep->dma) {
1335 /* turn off dma on inactive queues */
fae3c158
RRD
1336 if (list_empty(&ep->queue))
1337 stop_dma(ep->dma);
1da177e4
LT
1338 else if (!ep->stopped) {
1339 /* resume current request, or start new one */
1340 if (req)
fae3c158 1341 writel(dmactl, &ep->dma->dmactl);
1da177e4 1342 else
fae3c158 1343 start_dma(ep, list_entry(ep->queue.next,
1da177e4
LT
1344 struct net2280_request, queue));
1345 }
1346 }
1347
fae3c158 1348 spin_unlock_irqrestore(&ep->dev->lock, flags);
1da177e4
LT
1349 return 0;
1350}
1351
1352/*-------------------------------------------------------------------------*/
1353
fae3c158 1354static int net2280_fifo_status(struct usb_ep *_ep);
1da177e4
LT
1355
1356static int
8066134f 1357net2280_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedged)
1da177e4
LT
1358{
1359 struct net2280_ep *ep;
1360 unsigned long flags;
1361 int retval = 0;
1362
fae3c158 1363 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
1364 if (!_ep || (!ep->desc && ep->num != 0))
1365 return -EINVAL;
1366 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1367 return -ESHUTDOWN;
1368 if (ep->desc /* not ep0 */ && (ep->desc->bmAttributes & 0x03)
1369 == USB_ENDPOINT_XFER_ISOC)
1370 return -EINVAL;
1371
fae3c158
RRD
1372 spin_lock_irqsave(&ep->dev->lock, flags);
1373 if (!list_empty(&ep->queue))
1da177e4 1374 retval = -EAGAIN;
fae3c158 1375 else if (ep->is_in && value && net2280_fifo_status(_ep) != 0)
1da177e4
LT
1376 retval = -EAGAIN;
1377 else {
e56e69cc 1378 ep_vdbg(ep->dev, "%s %s %s\n", _ep->name,
8066134f
AS
1379 value ? "set" : "clear",
1380 wedged ? "wedge" : "halt");
1da177e4
LT
1381 /* set/clear, then synch memory views with the device */
1382 if (value) {
1383 if (ep->num == 0)
1384 ep->dev->protocol_stall = 1;
1385 else
fae3c158 1386 set_halt(ep);
8066134f
AS
1387 if (wedged)
1388 ep->wedged = 1;
1389 } else {
fae3c158 1390 clear_halt(ep);
2eeb0016 1391 if (ep->dev->quirks & PLX_SUPERSPEED &&
adc82f77
RRD
1392 !list_empty(&ep->queue) && ep->td_dma)
1393 restart_dma(ep);
8066134f
AS
1394 ep->wedged = 0;
1395 }
fae3c158 1396 (void) readl(&ep->regs->ep_rsp);
1da177e4 1397 }
fae3c158 1398 spin_unlock_irqrestore(&ep->dev->lock, flags);
1da177e4
LT
1399
1400 return retval;
1401}
1402
fae3c158 1403static int net2280_set_halt(struct usb_ep *_ep, int value)
8066134f
AS
1404{
1405 return net2280_set_halt_and_wedge(_ep, value, 0);
1406}
1407
fae3c158 1408static int net2280_set_wedge(struct usb_ep *_ep)
8066134f
AS
1409{
1410 if (!_ep || _ep->name == ep0name)
1411 return -EINVAL;
1412 return net2280_set_halt_and_wedge(_ep, 1, 1);
1413}
1414
fae3c158 1415static int net2280_fifo_status(struct usb_ep *_ep)
1da177e4
LT
1416{
1417 struct net2280_ep *ep;
1418 u32 avail;
1419
fae3c158 1420 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
1421 if (!_ep || (!ep->desc && ep->num != 0))
1422 return -ENODEV;
1423 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1424 return -ESHUTDOWN;
1425
3e76fdcb 1426 avail = readl(&ep->regs->ep_avail) & (BIT(12) - 1);
1da177e4
LT
1427 if (avail > ep->fifo_size)
1428 return -EOVERFLOW;
1429 if (ep->is_in)
1430 avail = ep->fifo_size - avail;
1431 return avail;
1432}
1433
fae3c158 1434static void net2280_fifo_flush(struct usb_ep *_ep)
1da177e4
LT
1435{
1436 struct net2280_ep *ep;
1437
fae3c158 1438 ep = container_of(_ep, struct net2280_ep, ep);
1da177e4
LT
1439 if (!_ep || (!ep->desc && ep->num != 0))
1440 return;
1441 if (!ep->dev->driver || ep->dev->gadget.speed == USB_SPEED_UNKNOWN)
1442 return;
1443
3e76fdcb 1444 writel(BIT(FIFO_FLUSH), &ep->regs->ep_stat);
fae3c158 1445 (void) readl(&ep->regs->ep_rsp);
1da177e4
LT
1446}
1447
901b3d75 1448static const struct usb_ep_ops net2280_ep_ops = {
1da177e4
LT
1449 .enable = net2280_enable,
1450 .disable = net2280_disable,
1451
1452 .alloc_request = net2280_alloc_request,
1453 .free_request = net2280_free_request,
1454
1da177e4
LT
1455 .queue = net2280_queue,
1456 .dequeue = net2280_dequeue,
1457
1458 .set_halt = net2280_set_halt,
8066134f 1459 .set_wedge = net2280_set_wedge,
1da177e4
LT
1460 .fifo_status = net2280_fifo_status,
1461 .fifo_flush = net2280_fifo_flush,
1462};
1463
1464/*-------------------------------------------------------------------------*/
1465
fae3c158 1466static int net2280_get_frame(struct usb_gadget *_gadget)
1da177e4
LT
1467{
1468 struct net2280 *dev;
1469 unsigned long flags;
1470 u16 retval;
1471
1472 if (!_gadget)
1473 return -ENODEV;
fae3c158
RRD
1474 dev = container_of(_gadget, struct net2280, gadget);
1475 spin_lock_irqsave(&dev->lock, flags);
1476 retval = get_idx_reg(dev->regs, REG_FRAME) & 0x03ff;
1477 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1478 return retval;
1479}
1480
fae3c158 1481static int net2280_wakeup(struct usb_gadget *_gadget)
1da177e4
LT
1482{
1483 struct net2280 *dev;
1484 u32 tmp;
1485 unsigned long flags;
1486
1487 if (!_gadget)
1488 return 0;
fae3c158 1489 dev = container_of(_gadget, struct net2280, gadget);
1da177e4 1490
fae3c158
RRD
1491 spin_lock_irqsave(&dev->lock, flags);
1492 tmp = readl(&dev->usb->usbctl);
3e76fdcb
RRD
1493 if (tmp & BIT(DEVICE_REMOTE_WAKEUP_ENABLE))
1494 writel(BIT(GENERATE_RESUME), &dev->usb->usbstat);
fae3c158 1495 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1496
1497 /* pci writes may still be posted */
1498 return 0;
1499}
1500
fae3c158 1501static int net2280_set_selfpowered(struct usb_gadget *_gadget, int value)
1da177e4
LT
1502{
1503 struct net2280 *dev;
1504 u32 tmp;
1505 unsigned long flags;
1506
1507 if (!_gadget)
1508 return 0;
fae3c158 1509 dev = container_of(_gadget, struct net2280, gadget);
1da177e4 1510
fae3c158
RRD
1511 spin_lock_irqsave(&dev->lock, flags);
1512 tmp = readl(&dev->usb->usbctl);
adc82f77 1513 if (value) {
3e76fdcb 1514 tmp |= BIT(SELF_POWERED_STATUS);
adc82f77
RRD
1515 dev->selfpowered = 1;
1516 } else {
3e76fdcb 1517 tmp &= ~BIT(SELF_POWERED_STATUS);
adc82f77
RRD
1518 dev->selfpowered = 0;
1519 }
fae3c158
RRD
1520 writel(tmp, &dev->usb->usbctl);
1521 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1522
1523 return 0;
1524}
1525
1526static int net2280_pullup(struct usb_gadget *_gadget, int is_on)
1527{
1528 struct net2280 *dev;
1529 u32 tmp;
1530 unsigned long flags;
1531
1532 if (!_gadget)
1533 return -ENODEV;
fae3c158 1534 dev = container_of(_gadget, struct net2280, gadget);
1da177e4 1535
fae3c158
RRD
1536 spin_lock_irqsave(&dev->lock, flags);
1537 tmp = readl(&dev->usb->usbctl);
1da177e4
LT
1538 dev->softconnect = (is_on != 0);
1539 if (is_on)
3e76fdcb 1540 tmp |= BIT(USB_DETECT_ENABLE);
1da177e4 1541 else
3e76fdcb 1542 tmp &= ~BIT(USB_DETECT_ENABLE);
fae3c158
RRD
1543 writel(tmp, &dev->usb->usbctl);
1544 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1545
1546 return 0;
1547}
1548
4cf5e00b
FB
1549static int net2280_start(struct usb_gadget *_gadget,
1550 struct usb_gadget_driver *driver);
1551static int net2280_stop(struct usb_gadget *_gadget,
1552 struct usb_gadget_driver *driver);
0f91349b 1553
1da177e4
LT
1554static const struct usb_gadget_ops net2280_ops = {
1555 .get_frame = net2280_get_frame,
1556 .wakeup = net2280_wakeup,
1557 .set_selfpowered = net2280_set_selfpowered,
1558 .pullup = net2280_pullup,
4cf5e00b
FB
1559 .udc_start = net2280_start,
1560 .udc_stop = net2280_stop,
1da177e4
LT
1561};
1562
1563/*-------------------------------------------------------------------------*/
1564
b99b406c 1565#ifdef CONFIG_USB_GADGET_DEBUG_FILES
1da177e4
LT
1566
1567/* FIXME move these into procfs, and use seq_file.
1568 * Sysfs _still_ doesn't behave for arbitrarily sized files,
1569 * and also doesn't help products using this with 2.4 kernels.
1570 */
1571
1572/* "function" sysfs attribute */
ce26bd23
GKH
1573static ssize_t function_show(struct device *_dev, struct device_attribute *attr,
1574 char *buf)
1da177e4 1575{
fae3c158 1576 struct net2280 *dev = dev_get_drvdata(_dev);
1da177e4 1577
fae3c158
RRD
1578 if (!dev->driver || !dev->driver->function ||
1579 strlen(dev->driver->function) > PAGE_SIZE)
1da177e4 1580 return 0;
fae3c158 1581 return scnprintf(buf, PAGE_SIZE, "%s\n", dev->driver->function);
1da177e4 1582}
ce26bd23 1583static DEVICE_ATTR_RO(function);
1da177e4 1584
ce26bd23
GKH
1585static ssize_t registers_show(struct device *_dev,
1586 struct device_attribute *attr, char *buf)
1da177e4
LT
1587{
1588 struct net2280 *dev;
1589 char *next;
1590 unsigned size, t;
1591 unsigned long flags;
1592 int i;
1593 u32 t1, t2;
30e69598 1594 const char *s;
1da177e4 1595
fae3c158 1596 dev = dev_get_drvdata(_dev);
1da177e4
LT
1597 next = buf;
1598 size = PAGE_SIZE;
fae3c158 1599 spin_lock_irqsave(&dev->lock, flags);
1da177e4
LT
1600
1601 if (dev->driver)
1602 s = dev->driver->driver.name;
1603 else
1604 s = "(none)";
1605
1606 /* Main Control Registers */
fae3c158 1607 t = scnprintf(next, size, "%s version " DRIVER_VERSION
1da177e4
LT
1608 ", chiprev %04x, dma %s\n\n"
1609 "devinit %03x fifoctl %08x gadget '%s'\n"
1610 "pci irqenb0 %02x irqenb1 %08x "
1611 "irqstat0 %04x irqstat1 %08x\n",
1612 driver_name, dev->chiprev,
1613 use_dma
1614 ? (use_dma_chaining ? "chaining" : "enabled")
1615 : "disabled",
fae3c158
RRD
1616 readl(&dev->regs->devinit),
1617 readl(&dev->regs->fifoctl),
1da177e4 1618 s,
fae3c158
RRD
1619 readl(&dev->regs->pciirqenb0),
1620 readl(&dev->regs->pciirqenb1),
1621 readl(&dev->regs->irqstat0),
1622 readl(&dev->regs->irqstat1));
1da177e4
LT
1623 size -= t;
1624 next += t;
1625
1626 /* USB Control Registers */
fae3c158
RRD
1627 t1 = readl(&dev->usb->usbctl);
1628 t2 = readl(&dev->usb->usbstat);
3e76fdcb
RRD
1629 if (t1 & BIT(VBUS_PIN)) {
1630 if (t2 & BIT(HIGH_SPEED))
1da177e4
LT
1631 s = "high speed";
1632 else if (dev->gadget.speed == USB_SPEED_UNKNOWN)
1633 s = "powered";
1634 else
1635 s = "full speed";
1636 /* full speed bit (6) not working?? */
1637 } else
1638 s = "not attached";
fae3c158 1639 t = scnprintf(next, size,
1da177e4
LT
1640 "stdrsp %08x usbctl %08x usbstat %08x "
1641 "addr 0x%02x (%s)\n",
fae3c158
RRD
1642 readl(&dev->usb->stdrsp), t1, t2,
1643 readl(&dev->usb->ouraddr), s);
1da177e4
LT
1644 size -= t;
1645 next += t;
1646
1647 /* PCI Master Control Registers */
1648
1649 /* DMA Control Registers */
1650
1651 /* Configurable EP Control Registers */
adc82f77 1652 for (i = 0; i < dev->n_ep; i++) {
1da177e4
LT
1653 struct net2280_ep *ep;
1654
fae3c158 1655 ep = &dev->ep[i];
1da177e4
LT
1656 if (i && !ep->desc)
1657 continue;
1658
adc82f77 1659 t1 = readl(&ep->cfg->ep_cfg);
fae3c158
RRD
1660 t2 = readl(&ep->regs->ep_rsp) & 0xff;
1661 t = scnprintf(next, size,
1da177e4
LT
1662 "\n%s\tcfg %05x rsp (%02x) %s%s%s%s%s%s%s%s"
1663 "irqenb %02x\n",
1664 ep->ep.name, t1, t2,
3e76fdcb 1665 (t2 & BIT(CLEAR_NAK_OUT_PACKETS))
1da177e4 1666 ? "NAK " : "",
3e76fdcb 1667 (t2 & BIT(CLEAR_EP_HIDE_STATUS_PHASE))
1da177e4 1668 ? "hide " : "",
3e76fdcb 1669 (t2 & BIT(CLEAR_EP_FORCE_CRC_ERROR))
1da177e4 1670 ? "CRC " : "",
3e76fdcb 1671 (t2 & BIT(CLEAR_INTERRUPT_MODE))
1da177e4 1672 ? "interrupt " : "",
3e76fdcb 1673 (t2 & BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE))
1da177e4 1674 ? "status " : "",
3e76fdcb 1675 (t2 & BIT(CLEAR_NAK_OUT_PACKETS_MODE))
1da177e4 1676 ? "NAKmode " : "",
3e76fdcb 1677 (t2 & BIT(CLEAR_ENDPOINT_TOGGLE))
1da177e4 1678 ? "DATA1 " : "DATA0 ",
3e76fdcb 1679 (t2 & BIT(CLEAR_ENDPOINT_HALT))
1da177e4 1680 ? "HALT " : "",
fae3c158 1681 readl(&ep->regs->ep_irqenb));
1da177e4
LT
1682 size -= t;
1683 next += t;
1684
fae3c158 1685 t = scnprintf(next, size,
1da177e4
LT
1686 "\tstat %08x avail %04x "
1687 "(ep%d%s-%s)%s\n",
fae3c158
RRD
1688 readl(&ep->regs->ep_stat),
1689 readl(&ep->regs->ep_avail),
1690 t1 & 0x0f, DIR_STRING(t1),
1691 type_string(t1 >> 8),
1da177e4
LT
1692 ep->stopped ? "*" : "");
1693 size -= t;
1694 next += t;
1695
1696 if (!ep->dma)
1697 continue;
1698
fae3c158 1699 t = scnprintf(next, size,
1da177e4
LT
1700 " dma\tctl %08x stat %08x count %08x\n"
1701 "\taddr %08x desc %08x\n",
fae3c158
RRD
1702 readl(&ep->dma->dmactl),
1703 readl(&ep->dma->dmastat),
1704 readl(&ep->dma->dmacount),
1705 readl(&ep->dma->dmaaddr),
1706 readl(&ep->dma->dmadesc));
1da177e4
LT
1707 size -= t;
1708 next += t;
1709
1710 }
1711
fae3c158 1712 /* Indexed Registers (none yet) */
1da177e4
LT
1713
1714 /* Statistics */
fae3c158 1715 t = scnprintf(next, size, "\nirqs: ");
1da177e4
LT
1716 size -= t;
1717 next += t;
adc82f77 1718 for (i = 0; i < dev->n_ep; i++) {
1da177e4
LT
1719 struct net2280_ep *ep;
1720
fae3c158 1721 ep = &dev->ep[i];
1da177e4
LT
1722 if (i && !ep->irqs)
1723 continue;
fae3c158 1724 t = scnprintf(next, size, " %s/%lu", ep->ep.name, ep->irqs);
1da177e4
LT
1725 size -= t;
1726 next += t;
1727
1728 }
fae3c158 1729 t = scnprintf(next, size, "\n");
1da177e4
LT
1730 size -= t;
1731 next += t;
1732
fae3c158 1733 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1734
1735 return PAGE_SIZE - size;
1736}
ce26bd23 1737static DEVICE_ATTR_RO(registers);
1da177e4 1738
ce26bd23
GKH
1739static ssize_t queues_show(struct device *_dev, struct device_attribute *attr,
1740 char *buf)
1da177e4
LT
1741{
1742 struct net2280 *dev;
1743 char *next;
1744 unsigned size;
1745 unsigned long flags;
1746 int i;
1747
fae3c158 1748 dev = dev_get_drvdata(_dev);
1da177e4
LT
1749 next = buf;
1750 size = PAGE_SIZE;
fae3c158 1751 spin_lock_irqsave(&dev->lock, flags);
1da177e4 1752
adc82f77 1753 for (i = 0; i < dev->n_ep; i++) {
fae3c158 1754 struct net2280_ep *ep = &dev->ep[i];
1da177e4
LT
1755 struct net2280_request *req;
1756 int t;
1757
1758 if (i != 0) {
1759 const struct usb_endpoint_descriptor *d;
1760
1761 d = ep->desc;
1762 if (!d)
1763 continue;
1764 t = d->bEndpointAddress;
fae3c158 1765 t = scnprintf(next, size,
1da177e4
LT
1766 "\n%s (ep%d%s-%s) max %04x %s fifo %d\n",
1767 ep->ep.name, t & USB_ENDPOINT_NUMBER_MASK,
1768 (t & USB_DIR_IN) ? "in" : "out",
a27f37a1 1769 type_string(d->bmAttributes),
fae3c158 1770 usb_endpoint_maxp(d) & 0x1fff,
1da177e4
LT
1771 ep->dma ? "dma" : "pio", ep->fifo_size
1772 );
1773 } else /* ep0 should only have one transfer queued */
fae3c158 1774 t = scnprintf(next, size, "ep0 max 64 pio %s\n",
1da177e4
LT
1775 ep->is_in ? "in" : "out");
1776 if (t <= 0 || t > size)
1777 goto done;
1778 size -= t;
1779 next += t;
1780
fae3c158
RRD
1781 if (list_empty(&ep->queue)) {
1782 t = scnprintf(next, size, "\t(nothing queued)\n");
1da177e4
LT
1783 if (t <= 0 || t > size)
1784 goto done;
1785 size -= t;
1786 next += t;
1787 continue;
1788 }
fae3c158
RRD
1789 list_for_each_entry(req, &ep->queue, queue) {
1790 if (ep->dma && req->td_dma == readl(&ep->dma->dmadesc))
1791 t = scnprintf(next, size,
1da177e4
LT
1792 "\treq %p len %d/%d "
1793 "buf %p (dmacount %08x)\n",
1794 &req->req, req->req.actual,
1795 req->req.length, req->req.buf,
fae3c158 1796 readl(&ep->dma->dmacount));
1da177e4 1797 else
fae3c158 1798 t = scnprintf(next, size,
1da177e4
LT
1799 "\treq %p len %d/%d buf %p\n",
1800 &req->req, req->req.actual,
1801 req->req.length, req->req.buf);
1802 if (t <= 0 || t > size)
1803 goto done;
1804 size -= t;
1805 next += t;
1806
1807 if (ep->dma) {
1808 struct net2280_dma *td;
1809
1810 td = req->td;
fae3c158 1811 t = scnprintf(next, size, "\t td %08x "
1da177e4
LT
1812 " count %08x buf %08x desc %08x\n",
1813 (u32) req->td_dma,
fae3c158
RRD
1814 le32_to_cpu(td->dmacount),
1815 le32_to_cpu(td->dmaaddr),
1816 le32_to_cpu(td->dmadesc));
1da177e4
LT
1817 if (t <= 0 || t > size)
1818 goto done;
1819 size -= t;
1820 next += t;
1821 }
1822 }
1823 }
1824
1825done:
fae3c158 1826 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4
LT
1827 return PAGE_SIZE - size;
1828}
ce26bd23 1829static DEVICE_ATTR_RO(queues);
1da177e4
LT
1830
1831
1832#else
1833
fae3c158
RRD
1834#define device_create_file(a, b) (0)
1835#define device_remove_file(a, b) do { } while (0)
1da177e4
LT
1836
1837#endif
1838
1839/*-------------------------------------------------------------------------*/
1840
1841/* another driver-specific mode might be a request type doing dma
1842 * to/from another device fifo instead of to/from memory.
1843 */
1844
fae3c158 1845static void set_fifo_mode(struct net2280 *dev, int mode)
1da177e4
LT
1846{
1847 /* keeping high bits preserves BAR2 */
fae3c158 1848 writel((0xffff << PCI_BASE2_RANGE) | mode, &dev->regs->fifoctl);
1da177e4
LT
1849
1850 /* always ep-{a,b,e,f} ... maybe not ep-c or ep-d */
fae3c158
RRD
1851 INIT_LIST_HEAD(&dev->gadget.ep_list);
1852 list_add_tail(&dev->ep[1].ep.ep_list, &dev->gadget.ep_list);
1853 list_add_tail(&dev->ep[2].ep.ep_list, &dev->gadget.ep_list);
1da177e4
LT
1854 switch (mode) {
1855 case 0:
fae3c158
RRD
1856 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1857 list_add_tail(&dev->ep[4].ep.ep_list, &dev->gadget.ep_list);
1858 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 1024;
1da177e4
LT
1859 break;
1860 case 1:
fae3c158 1861 dev->ep[1].fifo_size = dev->ep[2].fifo_size = 2048;
1da177e4
LT
1862 break;
1863 case 2:
fae3c158
RRD
1864 list_add_tail(&dev->ep[3].ep.ep_list, &dev->gadget.ep_list);
1865 dev->ep[1].fifo_size = 2048;
1866 dev->ep[2].fifo_size = 1024;
1da177e4
LT
1867 break;
1868 }
1869 /* fifo sizes for ep0, ep-c, ep-d, ep-e, and ep-f never change */
fae3c158
RRD
1870 list_add_tail(&dev->ep[5].ep.ep_list, &dev->gadget.ep_list);
1871 list_add_tail(&dev->ep[6].ep.ep_list, &dev->gadget.ep_list);
1da177e4
LT
1872}
1873
adc82f77
RRD
1874static void defect7374_disable_data_eps(struct net2280 *dev)
1875{
1876 /*
1877 * For Defect 7374, disable data EPs (and more):
1878 * - This phase undoes the earlier phase of the Defect 7374 workaround,
1879 * returing ep regs back to normal.
1880 */
1881 struct net2280_ep *ep;
1882 int i;
1883 unsigned char ep_sel;
1884 u32 tmp_reg;
1885
1886 for (i = 1; i < 5; i++) {
1887 ep = &dev->ep[i];
1888 writel(0, &ep->cfg->ep_cfg);
1889 }
1890
1891 /* CSROUT, CSRIN, PCIOUT, PCIIN, STATIN, RCIN */
1892 for (i = 0; i < 6; i++)
1893 writel(0, &dev->dep[i].dep_cfg);
1894
1895 for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
1896 /* Select an endpoint for subsequent operations: */
1897 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
1898 writel(((tmp_reg & ~0x1f) | ep_sel), &dev->plregs->pl_ep_ctrl);
1899
1900 if (ep_sel < 2 || (ep_sel > 9 && ep_sel < 14) ||
1901 ep_sel == 18 || ep_sel == 20)
1902 continue;
1903
1904 /* Change settings on some selected endpoints */
1905 tmp_reg = readl(&dev->plregs->pl_ep_cfg_4);
3e76fdcb 1906 tmp_reg &= ~BIT(NON_CTRL_IN_TOLERATE_BAD_DIR);
adc82f77
RRD
1907 writel(tmp_reg, &dev->plregs->pl_ep_cfg_4);
1908 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
3e76fdcb 1909 tmp_reg |= BIT(EP_INITIALIZED);
adc82f77
RRD
1910 writel(tmp_reg, &dev->plregs->pl_ep_ctrl);
1911 }
1912}
1913
1914static void defect7374_enable_data_eps_zero(struct net2280 *dev)
1915{
1916 u32 tmp = 0, tmp_reg;
1917 u32 fsmvalue, scratch;
1918 int i;
1919 unsigned char ep_sel;
1920
1921 scratch = get_idx_reg(dev->regs, SCRATCH);
1922 fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD);
1923 scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
1924
1925 /*See if firmware needs to set up for workaround*/
1926 if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) {
e56e69cc
RRD
1927 ep_warn(dev, "Operate Defect 7374 workaround soft this time");
1928 ep_warn(dev, "It will operate on cold-reboot and SS connect");
adc82f77
RRD
1929
1930 /*GPEPs:*/
3e76fdcb 1931 tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_DIRECTION) |
adc82f77
RRD
1932 (2 << OUT_ENDPOINT_TYPE) | (2 << IN_ENDPOINT_TYPE) |
1933 ((dev->enhanced_mode) ?
3e76fdcb
RRD
1934 BIT(OUT_ENDPOINT_ENABLE) : BIT(ENDPOINT_ENABLE)) |
1935 BIT(IN_ENDPOINT_ENABLE));
adc82f77
RRD
1936
1937 for (i = 1; i < 5; i++)
1938 writel(tmp, &dev->ep[i].cfg->ep_cfg);
1939
1940 /* CSRIN, PCIIN, STATIN, RCIN*/
3e76fdcb 1941 tmp = ((0 << ENDPOINT_NUMBER) | BIT(ENDPOINT_ENABLE));
adc82f77
RRD
1942 writel(tmp, &dev->dep[1].dep_cfg);
1943 writel(tmp, &dev->dep[3].dep_cfg);
1944 writel(tmp, &dev->dep[4].dep_cfg);
1945 writel(tmp, &dev->dep[5].dep_cfg);
1946
1947 /*Implemented for development and debug.
1948 * Can be refined/tuned later.*/
1949 for (ep_sel = 0; ep_sel <= 21; ep_sel++) {
1950 /* Select an endpoint for subsequent operations: */
1951 tmp_reg = readl(&dev->plregs->pl_ep_ctrl);
1952 writel(((tmp_reg & ~0x1f) | ep_sel),
1953 &dev->plregs->pl_ep_ctrl);
1954
1955 if (ep_sel == 1) {
1956 tmp =
1957 (readl(&dev->plregs->pl_ep_ctrl) |
3e76fdcb 1958 BIT(CLEAR_ACK_ERROR_CODE) | 0);
adc82f77
RRD
1959 writel(tmp, &dev->plregs->pl_ep_ctrl);
1960 continue;
1961 }
1962
1963 if (ep_sel == 0 || (ep_sel > 9 && ep_sel < 14) ||
1964 ep_sel == 18 || ep_sel == 20)
1965 continue;
1966
1967 tmp = (readl(&dev->plregs->pl_ep_cfg_4) |
3e76fdcb 1968 BIT(NON_CTRL_IN_TOLERATE_BAD_DIR) | 0);
adc82f77
RRD
1969 writel(tmp, &dev->plregs->pl_ep_cfg_4);
1970
1971 tmp = readl(&dev->plregs->pl_ep_ctrl) &
3e76fdcb 1972 ~BIT(EP_INITIALIZED);
adc82f77
RRD
1973 writel(tmp, &dev->plregs->pl_ep_ctrl);
1974
1975 }
1976
1977 /* Set FSM to focus on the first Control Read:
1978 * - Tip: Connection speed is known upon the first
1979 * setup request.*/
1980 scratch |= DEFECT7374_FSM_WAITING_FOR_CONTROL_READ;
1981 set_idx_reg(dev->regs, SCRATCH, scratch);
1982
1983 } else{
e56e69cc
RRD
1984 ep_warn(dev, "Defect 7374 workaround soft will NOT operate");
1985 ep_warn(dev, "It will operate on cold-reboot and SS connect");
adc82f77
RRD
1986 }
1987}
1988
1da177e4
LT
1989/* keeping it simple:
1990 * - one bus driver, initted first;
1991 * - one function driver, initted second
1992 *
1993 * most of the work to support multiple net2280 controllers would
1994 * be to associate this gadget driver (yes?) with all of them, or
1995 * perhaps to bind specific drivers to specific devices.
1996 */
1997
adc82f77 1998static void usb_reset_228x(struct net2280 *dev)
1da177e4
LT
1999{
2000 u32 tmp;
2001
2002 dev->gadget.speed = USB_SPEED_UNKNOWN;
fae3c158 2003 (void) readl(&dev->usb->usbctl);
1da177e4 2004
fae3c158 2005 net2280_led_init(dev);
1da177e4
LT
2006
2007 /* disable automatic responses, and irqs */
fae3c158
RRD
2008 writel(0, &dev->usb->stdrsp);
2009 writel(0, &dev->regs->pciirqenb0);
2010 writel(0, &dev->regs->pciirqenb1);
1da177e4
LT
2011
2012 /* clear old dma and irq state */
2013 for (tmp = 0; tmp < 4; tmp++) {
adc82f77 2014 struct net2280_ep *ep = &dev->ep[tmp + 1];
1da177e4 2015 if (ep->dma)
adc82f77 2016 abort_dma(ep);
1da177e4 2017 }
adc82f77 2018
fae3c158 2019 writel(~0, &dev->regs->irqstat0),
3e76fdcb 2020 writel(~(u32)BIT(SUSPEND_REQUEST_INTERRUPT), &dev->regs->irqstat1),
1da177e4
LT
2021
2022 /* reset, and enable pci */
3e76fdcb
RRD
2023 tmp = readl(&dev->regs->devinit) |
2024 BIT(PCI_ENABLE) |
2025 BIT(FIFO_SOFT_RESET) |
2026 BIT(USB_SOFT_RESET) |
2027 BIT(M8051_RESET);
fae3c158 2028 writel(tmp, &dev->regs->devinit);
1da177e4
LT
2029
2030 /* standard fifo and endpoint allocations */
fae3c158 2031 set_fifo_mode(dev, (fifo_mode <= 2) ? fifo_mode : 0);
1da177e4
LT
2032}
2033
adc82f77
RRD
2034static void usb_reset_338x(struct net2280 *dev)
2035{
2036 u32 tmp;
2037 u32 fsmvalue;
2038
2039 dev->gadget.speed = USB_SPEED_UNKNOWN;
2040 (void)readl(&dev->usb->usbctl);
2041
2042 net2280_led_init(dev);
2043
2044 fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
2045 (0xf << DEFECT7374_FSM_FIELD);
2046
2047 /* See if firmware needs to set up for workaround: */
2048 if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ) {
e56e69cc 2049 ep_info(dev, "%s: Defect 7374 FsmValue 0x%08x\n", __func__,
adc82f77
RRD
2050 fsmvalue);
2051 } else {
2052 /* disable automatic responses, and irqs */
2053 writel(0, &dev->usb->stdrsp);
2054 writel(0, &dev->regs->pciirqenb0);
2055 writel(0, &dev->regs->pciirqenb1);
2056 }
2057
2058 /* clear old dma and irq state */
2059 for (tmp = 0; tmp < 4; tmp++) {
2060 struct net2280_ep *ep = &dev->ep[tmp + 1];
2061
2062 if (ep->dma)
2063 abort_dma(ep);
2064 }
2065
2066 writel(~0, &dev->regs->irqstat0), writel(~0, &dev->regs->irqstat1);
2067
2068 if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ) {
2069 /* reset, and enable pci */
2070 tmp = readl(&dev->regs->devinit) |
3e76fdcb
RRD
2071 BIT(PCI_ENABLE) |
2072 BIT(FIFO_SOFT_RESET) |
2073 BIT(USB_SOFT_RESET) |
2074 BIT(M8051_RESET);
adc82f77
RRD
2075
2076 writel(tmp, &dev->regs->devinit);
2077 }
2078
2079 /* always ep-{1,2,3,4} ... maybe not ep-3 or ep-4 */
2080 INIT_LIST_HEAD(&dev->gadget.ep_list);
2081
2082 for (tmp = 1; tmp < dev->n_ep; tmp++)
2083 list_add_tail(&dev->ep[tmp].ep.ep_list, &dev->gadget.ep_list);
2084
2085}
2086
2087static void usb_reset(struct net2280 *dev)
2088{
2eeb0016 2089 if (dev->quirks & PLX_LEGACY)
adc82f77
RRD
2090 return usb_reset_228x(dev);
2091 return usb_reset_338x(dev);
2092}
2093
2094static void usb_reinit_228x(struct net2280 *dev)
1da177e4
LT
2095{
2096 u32 tmp;
2097 int init_dma;
2098
2099 /* use_dma changes are ignored till next device re-init */
2100 init_dma = use_dma;
2101
2102 /* basic endpoint init */
2103 for (tmp = 0; tmp < 7; tmp++) {
fae3c158 2104 struct net2280_ep *ep = &dev->ep[tmp];
1da177e4 2105
fae3c158 2106 ep->ep.name = ep_name[tmp];
1da177e4
LT
2107 ep->dev = dev;
2108 ep->num = tmp;
2109
2110 if (tmp > 0 && tmp <= 4) {
2111 ep->fifo_size = 1024;
2112 if (init_dma)
fae3c158 2113 ep->dma = &dev->dma[tmp - 1];
1da177e4
LT
2114 } else
2115 ep->fifo_size = 64;
fae3c158 2116 ep->regs = &dev->epregs[tmp];
adc82f77
RRD
2117 ep->cfg = &dev->epregs[tmp];
2118 ep_reset_228x(dev->regs, ep);
1da177e4 2119 }
fae3c158
RRD
2120 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 64);
2121 usb_ep_set_maxpacket_limit(&dev->ep[5].ep, 64);
2122 usb_ep_set_maxpacket_limit(&dev->ep[6].ep, 64);
1da177e4 2123
fae3c158
RRD
2124 dev->gadget.ep0 = &dev->ep[0].ep;
2125 dev->ep[0].stopped = 0;
2126 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
1da177e4
LT
2127
2128 /* we want to prevent lowlevel/insecure access from the USB host,
2129 * but erratum 0119 means this enable bit is ignored
2130 */
2131 for (tmp = 0; tmp < 5; tmp++)
fae3c158 2132 writel(EP_DONTUSE, &dev->dep[tmp].dep_cfg);
1da177e4
LT
2133}
2134
adc82f77
RRD
2135static void usb_reinit_338x(struct net2280 *dev)
2136{
2137 int init_dma;
2138 int i;
2139 u32 tmp, val;
2140 u32 fsmvalue;
2141 static const u32 ne[9] = { 0, 1, 2, 3, 4, 1, 2, 3, 4 };
2142 static const u32 ep_reg_addr[9] = { 0x00, 0xC0, 0x00, 0xC0, 0x00,
2143 0x00, 0xC0, 0x00, 0xC0 };
2144
2145 /* use_dma changes are ignored till next device re-init */
2146 init_dma = use_dma;
2147
2148 /* basic endpoint init */
2149 for (i = 0; i < dev->n_ep; i++) {
2150 struct net2280_ep *ep = &dev->ep[i];
2151
2152 ep->ep.name = ep_name[i];
2153 ep->dev = dev;
2154 ep->num = i;
2155
2156 if (i > 0 && i <= 4 && init_dma)
2157 ep->dma = &dev->dma[i - 1];
2158
2159 if (dev->enhanced_mode) {
2160 ep->cfg = &dev->epregs[ne[i]];
2161 ep->regs = (struct net2280_ep_regs __iomem *)
c43e97b2 2162 (((void __iomem *)&dev->epregs[ne[i]]) +
adc82f77
RRD
2163 ep_reg_addr[i]);
2164 ep->fiforegs = &dev->fiforegs[i];
2165 } else {
2166 ep->cfg = &dev->epregs[i];
2167 ep->regs = &dev->epregs[i];
2168 ep->fiforegs = &dev->fiforegs[i];
2169 }
2170
2171 ep->fifo_size = (i != 0) ? 2048 : 512;
2172
2173 ep_reset_338x(dev->regs, ep);
2174 }
2175 usb_ep_set_maxpacket_limit(&dev->ep[0].ep, 512);
2176
2177 dev->gadget.ep0 = &dev->ep[0].ep;
2178 dev->ep[0].stopped = 0;
2179
2180 /* Link layer set up */
2181 fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
2182 (0xf << DEFECT7374_FSM_FIELD);
2183
2184 /* See if driver needs to set up for workaround: */
2185 if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ)
e56e69cc 2186 ep_info(dev, "%s: Defect 7374 FsmValue %08x\n",
adc82f77
RRD
2187 __func__, fsmvalue);
2188 else {
2189 tmp = readl(&dev->usb_ext->usbctl2) &
3e76fdcb 2190 ~(BIT(U1_ENABLE) | BIT(U2_ENABLE) | BIT(LTM_ENABLE));
adc82f77
RRD
2191 writel(tmp, &dev->usb_ext->usbctl2);
2192 }
2193
2194 /* Hardware Defect and Workaround */
2195 val = readl(&dev->ll_lfps_regs->ll_lfps_5);
2196 val &= ~(0xf << TIMER_LFPS_6US);
2197 val |= 0x5 << TIMER_LFPS_6US;
2198 writel(val, &dev->ll_lfps_regs->ll_lfps_5);
2199
2200 val = readl(&dev->ll_lfps_regs->ll_lfps_6);
2201 val &= ~(0xffff << TIMER_LFPS_80US);
2202 val |= 0x0100 << TIMER_LFPS_80US;
2203 writel(val, &dev->ll_lfps_regs->ll_lfps_6);
2204
2205 /*
2206 * AA_AB Errata. Issue 4. Workaround for SuperSpeed USB
2207 * Hot Reset Exit Handshake may Fail in Specific Case using
2208 * Default Register Settings. Workaround for Enumeration test.
2209 */
2210 val = readl(&dev->ll_tsn_regs->ll_tsn_counters_2);
2211 val &= ~(0x1f << HOT_TX_NORESET_TS2);
2212 val |= 0x10 << HOT_TX_NORESET_TS2;
2213 writel(val, &dev->ll_tsn_regs->ll_tsn_counters_2);
2214
2215 val = readl(&dev->ll_tsn_regs->ll_tsn_counters_3);
2216 val &= ~(0x1f << HOT_RX_RESET_TS2);
2217 val |= 0x3 << HOT_RX_RESET_TS2;
2218 writel(val, &dev->ll_tsn_regs->ll_tsn_counters_3);
2219
2220 /*
2221 * Set Recovery Idle to Recover bit:
2222 * - On SS connections, setting Recovery Idle to Recover Fmw improves
2223 * link robustness with various hosts and hubs.
2224 * - It is safe to set for all connection speeds; all chip revisions.
2225 * - R-M-W to leave other bits undisturbed.
2226 * - Reference PLX TT-7372
2227 */
2228 val = readl(&dev->ll_chicken_reg->ll_tsn_chicken_bit);
3e76fdcb 2229 val |= BIT(RECOVERY_IDLE_TO_RECOVER_FMW);
adc82f77
RRD
2230 writel(val, &dev->ll_chicken_reg->ll_tsn_chicken_bit);
2231
2232 INIT_LIST_HEAD(&dev->gadget.ep0->ep_list);
2233
2234 /* disable dedicated endpoints */
2235 writel(0x0D, &dev->dep[0].dep_cfg);
2236 writel(0x0D, &dev->dep[1].dep_cfg);
2237 writel(0x0E, &dev->dep[2].dep_cfg);
2238 writel(0x0E, &dev->dep[3].dep_cfg);
2239 writel(0x0F, &dev->dep[4].dep_cfg);
2240 writel(0x0C, &dev->dep[5].dep_cfg);
2241}
2242
2243static void usb_reinit(struct net2280 *dev)
2244{
2eeb0016 2245 if (dev->quirks & PLX_LEGACY)
adc82f77
RRD
2246 return usb_reinit_228x(dev);
2247 return usb_reinit_338x(dev);
2248}
2249
2250static void ep0_start_228x(struct net2280 *dev)
1da177e4 2251{
3e76fdcb
RRD
2252 writel(BIT(CLEAR_EP_HIDE_STATUS_PHASE) |
2253 BIT(CLEAR_NAK_OUT_PACKETS) |
ae8e530a
RRD
2254 BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE),
2255 &dev->epregs[0].ep_rsp);
1da177e4
LT
2256
2257 /*
2258 * hardware optionally handles a bunch of standard requests
2259 * that the API hides from drivers anyway. have it do so.
2260 * endpoint status/features are handled in software, to
2261 * help pass tests for some dubious behavior.
2262 */
3e76fdcb
RRD
2263 writel(BIT(SET_TEST_MODE) |
2264 BIT(SET_ADDRESS) |
2265 BIT(DEVICE_SET_CLEAR_DEVICE_REMOTE_WAKEUP) |
2266 BIT(GET_DEVICE_STATUS) |
ae8e530a
RRD
2267 BIT(GET_INTERFACE_STATUS),
2268 &dev->usb->stdrsp);
3e76fdcb
RRD
2269 writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
2270 BIT(SELF_POWERED_USB_DEVICE) |
2271 BIT(REMOTE_WAKEUP_SUPPORT) |
2272 (dev->softconnect << USB_DETECT_ENABLE) |
2273 BIT(SELF_POWERED_STATUS),
2274 &dev->usb->usbctl);
1da177e4
LT
2275
2276 /* enable irqs so we can see ep0 and general operation */
3e76fdcb
RRD
2277 writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
2278 BIT(ENDPOINT_0_INTERRUPT_ENABLE),
2279 &dev->regs->pciirqenb0);
2280 writel(BIT(PCI_INTERRUPT_ENABLE) |
2281 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT_ENABLE) |
2282 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT_ENABLE) |
2283 BIT(PCI_RETRY_ABORT_INTERRUPT_ENABLE) |
2284 BIT(VBUS_INTERRUPT_ENABLE) |
2285 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
2286 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE),
2287 &dev->regs->pciirqenb1);
1da177e4
LT
2288
2289 /* don't leave any writes posted */
fae3c158 2290 (void) readl(&dev->usb->usbctl);
1da177e4
LT
2291}
2292
adc82f77
RRD
2293static void ep0_start_338x(struct net2280 *dev)
2294{
2295 u32 fsmvalue;
2296
2297 fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
2298 (0xf << DEFECT7374_FSM_FIELD);
2299
2300 if (fsmvalue != DEFECT7374_FSM_SS_CONTROL_READ)
e56e69cc 2301 ep_info(dev, "%s: Defect 7374 FsmValue %08x\n", __func__,
adc82f77
RRD
2302 fsmvalue);
2303 else
3e76fdcb
RRD
2304 writel(BIT(CLEAR_NAK_OUT_PACKETS_MODE) |
2305 BIT(SET_EP_HIDE_STATUS_PHASE),
adc82f77
RRD
2306 &dev->epregs[0].ep_rsp);
2307
2308 /*
2309 * hardware optionally handles a bunch of standard requests
2310 * that the API hides from drivers anyway. have it do so.
2311 * endpoint status/features are handled in software, to
2312 * help pass tests for some dubious behavior.
2313 */
3e76fdcb
RRD
2314 writel(BIT(SET_ISOCHRONOUS_DELAY) |
2315 BIT(SET_SEL) |
2316 BIT(SET_TEST_MODE) |
2317 BIT(SET_ADDRESS) |
2318 BIT(GET_INTERFACE_STATUS) |
2319 BIT(GET_DEVICE_STATUS),
adc82f77
RRD
2320 &dev->usb->stdrsp);
2321 dev->wakeup_enable = 1;
3e76fdcb 2322 writel(BIT(USB_ROOT_PORT_WAKEUP_ENABLE) |
adc82f77 2323 (dev->softconnect << USB_DETECT_ENABLE) |
3e76fdcb 2324 BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
adc82f77
RRD
2325 &dev->usb->usbctl);
2326
2327 /* enable irqs so we can see ep0 and general operation */
3e76fdcb 2328 writel(BIT(SETUP_PACKET_INTERRUPT_ENABLE) |
ae8e530a
RRD
2329 BIT(ENDPOINT_0_INTERRUPT_ENABLE),
2330 &dev->regs->pciirqenb0);
3e76fdcb
RRD
2331 writel(BIT(PCI_INTERRUPT_ENABLE) |
2332 BIT(ROOT_PORT_RESET_INTERRUPT_ENABLE) |
2333 BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT_ENABLE) |
2334 BIT(VBUS_INTERRUPT_ENABLE),
adc82f77
RRD
2335 &dev->regs->pciirqenb1);
2336
2337 /* don't leave any writes posted */
2338 (void)readl(&dev->usb->usbctl);
2339}
2340
2341static void ep0_start(struct net2280 *dev)
2342{
2eeb0016 2343 if (dev->quirks & PLX_LEGACY)
adc82f77
RRD
2344 return ep0_start_228x(dev);
2345 return ep0_start_338x(dev);
2346}
2347
1da177e4
LT
2348/* when a driver is successfully registered, it will receive
2349 * control requests including set_configuration(), which enables
2350 * non-control requests. then usb traffic follows until a
2351 * disconnect is reported. then a host may connect again, or
2352 * the driver might get unbound.
2353 */
4cf5e00b
FB
2354static int net2280_start(struct usb_gadget *_gadget,
2355 struct usb_gadget_driver *driver)
1da177e4 2356{
4cf5e00b 2357 struct net2280 *dev;
1da177e4
LT
2358 int retval;
2359 unsigned i;
2360
2361 /* insist on high speed support from the driver, since
2362 * (dev->usb->xcvrdiag & FORCE_FULL_SPEED_MODE)
2363 * "must not be used in normal operation"
2364 */
ae8e530a
RRD
2365 if (!driver || driver->max_speed < USB_SPEED_HIGH ||
2366 !driver->setup)
1da177e4 2367 return -EINVAL;
4cf5e00b 2368
fae3c158 2369 dev = container_of(_gadget, struct net2280, gadget);
1da177e4 2370
adc82f77 2371 for (i = 0; i < dev->n_ep; i++)
fae3c158 2372 dev->ep[i].irqs = 0;
1da177e4
LT
2373
2374 /* hook up the driver ... */
2375 dev->softconnect = 1;
2376 driver->driver.bus = NULL;
2377 dev->driver = driver;
1da177e4 2378
fae3c158
RRD
2379 retval = device_create_file(&dev->pdev->dev, &dev_attr_function);
2380 if (retval)
2381 goto err_unbind;
2382 retval = device_create_file(&dev->pdev->dev, &dev_attr_queues);
2383 if (retval)
2384 goto err_func;
1da177e4 2385
2f076077 2386 /* Enable force-full-speed testing mode, if desired */
2eeb0016 2387 if (full_speed && (dev->quirks & PLX_LEGACY))
3e76fdcb 2388 writel(BIT(FORCE_FULL_SPEED_MODE), &dev->usb->xcvrdiag);
2f076077 2389
1da177e4
LT
2390 /* ... then enable host detection and ep0; and we're ready
2391 * for set_configuration as well as eventual disconnect.
2392 */
fae3c158 2393 net2280_led_active(dev, 1);
adc82f77 2394
2eeb0016 2395 if (dev->quirks & PLX_SUPERSPEED)
adc82f77
RRD
2396 defect7374_enable_data_eps_zero(dev);
2397
fae3c158 2398 ep0_start(dev);
1da177e4 2399
e56e69cc 2400 ep_dbg(dev, "%s ready, usbctl %08x stdrsp %08x\n",
1da177e4 2401 driver->driver.name,
fae3c158
RRD
2402 readl(&dev->usb->usbctl),
2403 readl(&dev->usb->stdrsp));
1da177e4
LT
2404
2405 /* pci writes may still be posted */
2406 return 0;
b3899dac
JG
2407
2408err_func:
fae3c158 2409 device_remove_file(&dev->pdev->dev, &dev_attr_function);
b3899dac 2410err_unbind:
b3899dac
JG
2411 dev->driver = NULL;
2412 return retval;
1da177e4 2413}
1da177e4 2414
fae3c158 2415static void stop_activity(struct net2280 *dev, struct usb_gadget_driver *driver)
1da177e4
LT
2416{
2417 int i;
2418
2419 /* don't disconnect if it's not connected */
2420 if (dev->gadget.speed == USB_SPEED_UNKNOWN)
2421 driver = NULL;
2422
2423 /* stop hardware; prevent new request submissions;
2424 * and kill any outstanding requests.
2425 */
fae3c158 2426 usb_reset(dev);
adc82f77 2427 for (i = 0; i < dev->n_ep; i++)
fae3c158 2428 nuke(&dev->ep[i]);
1da177e4 2429
699412d9
FB
2430 /* report disconnect; the driver is already quiesced */
2431 if (driver) {
2432 spin_unlock(&dev->lock);
2433 driver->disconnect(&dev->gadget);
2434 spin_lock(&dev->lock);
2435 }
2436
fae3c158 2437 usb_reinit(dev);
1da177e4
LT
2438}
2439
4cf5e00b
FB
2440static int net2280_stop(struct usb_gadget *_gadget,
2441 struct usb_gadget_driver *driver)
1da177e4 2442{
4cf5e00b 2443 struct net2280 *dev;
1da177e4
LT
2444 unsigned long flags;
2445
fae3c158 2446 dev = container_of(_gadget, struct net2280, gadget);
1da177e4 2447
fae3c158
RRD
2448 spin_lock_irqsave(&dev->lock, flags);
2449 stop_activity(dev, driver);
2450 spin_unlock_irqrestore(&dev->lock, flags);
1da177e4 2451
1da177e4
LT
2452 dev->driver = NULL;
2453
fae3c158 2454 net2280_led_active(dev, 0);
2f076077
AS
2455
2456 /* Disable full-speed test mode */
2eeb0016 2457 if (dev->quirks & PLX_LEGACY)
adc82f77 2458 writel(0, &dev->usb->xcvrdiag);
2f076077 2459
fae3c158
RRD
2460 device_remove_file(&dev->pdev->dev, &dev_attr_function);
2461 device_remove_file(&dev->pdev->dev, &dev_attr_queues);
1da177e4 2462
e56e69cc 2463 ep_dbg(dev, "unregistered driver '%s'\n",
84237bfb
RRD
2464 driver ? driver->driver.name : "");
2465
1da177e4
LT
2466 return 0;
2467}
1da177e4
LT
2468
2469/*-------------------------------------------------------------------------*/
2470
2471/* handle ep0, ep-e, ep-f with 64 byte packets: packet per irq.
2472 * also works for dma-capable endpoints, in pio mode or just
2473 * to manually advance the queue after short OUT transfers.
2474 */
fae3c158 2475static void handle_ep_small(struct net2280_ep *ep)
1da177e4
LT
2476{
2477 struct net2280_request *req;
2478 u32 t;
2479 /* 0 error, 1 mid-data, 2 done */
2480 int mode = 1;
2481
fae3c158
RRD
2482 if (!list_empty(&ep->queue))
2483 req = list_entry(ep->queue.next,
1da177e4
LT
2484 struct net2280_request, queue);
2485 else
2486 req = NULL;
2487
2488 /* ack all, and handle what we care about */
fae3c158 2489 t = readl(&ep->regs->ep_stat);
1da177e4
LT
2490 ep->irqs++;
2491#if 0
e56e69cc 2492 ep_vdbg(ep->dev, "%s ack ep_stat %08x, req %p\n",
1da177e4
LT
2493 ep->ep.name, t, req ? &req->req : 0);
2494#endif
2eeb0016 2495 if (!ep->is_in || (ep->dev->quirks & PLX_2280))
3e76fdcb 2496 writel(t & ~BIT(NAK_OUT_PACKETS), &ep->regs->ep_stat);
950ee4c8
GL
2497 else
2498 /* Added for 2282 */
fae3c158 2499 writel(t, &ep->regs->ep_stat);
1da177e4
LT
2500
2501 /* for ep0, monitor token irqs to catch data stage length errors
2502 * and to synchronize on status.
2503 *
2504 * also, to defer reporting of protocol stalls ... here's where
2505 * data or status first appears, handling stalls here should never
2506 * cause trouble on the host side..
2507 *
2508 * control requests could be slightly faster without token synch for
2509 * status, but status can jam up that way.
2510 */
fae3c158 2511 if (unlikely(ep->num == 0)) {
1da177e4
LT
2512 if (ep->is_in) {
2513 /* status; stop NAKing */
3e76fdcb 2514 if (t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) {
1da177e4
LT
2515 if (ep->dev->protocol_stall) {
2516 ep->stopped = 1;
fae3c158 2517 set_halt(ep);
1da177e4
LT
2518 }
2519 if (!req)
fae3c158 2520 allow_status(ep);
1da177e4
LT
2521 mode = 2;
2522 /* reply to extra IN data tokens with a zlp */
3e76fdcb 2523 } else if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
1da177e4
LT
2524 if (ep->dev->protocol_stall) {
2525 ep->stopped = 1;
fae3c158 2526 set_halt(ep);
1da177e4 2527 mode = 2;
1f26e28d
AS
2528 } else if (ep->responded &&
2529 !req && !ep->stopped)
fae3c158 2530 write_fifo(ep, NULL);
1da177e4
LT
2531 }
2532 } else {
2533 /* status; stop NAKing */
3e76fdcb 2534 if (t & BIT(DATA_IN_TOKEN_INTERRUPT)) {
1da177e4
LT
2535 if (ep->dev->protocol_stall) {
2536 ep->stopped = 1;
fae3c158 2537 set_halt(ep);
1da177e4
LT
2538 }
2539 mode = 2;
2540 /* an extra OUT token is an error */
ae8e530a
RRD
2541 } else if (((t & BIT(DATA_OUT_PING_TOKEN_INTERRUPT)) &&
2542 req &&
2543 req->req.actual == req->req.length) ||
2544 (ep->responded && !req)) {
1da177e4 2545 ep->dev->protocol_stall = 1;
fae3c158 2546 set_halt(ep);
1da177e4
LT
2547 ep->stopped = 1;
2548 if (req)
fae3c158 2549 done(ep, req, -EOVERFLOW);
1da177e4
LT
2550 req = NULL;
2551 }
2552 }
2553 }
2554
fae3c158 2555 if (unlikely(!req))
1da177e4
LT
2556 return;
2557
2558 /* manual DMA queue advance after short OUT */
fae3c158 2559 if (likely(ep->dma)) {
3e76fdcb 2560 if (t & BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT)) {
1da177e4
LT
2561 u32 count;
2562 int stopped = ep->stopped;
2563
2564 /* TRANSFERRED works around OUT_DONE erratum 0112.
2565 * we expect (N <= maxpacket) bytes; host wrote M.
2566 * iff (M < N) we won't ever see a DMA interrupt.
2567 */
2568 ep->stopped = 1;
fae3c158 2569 for (count = 0; ; t = readl(&ep->regs->ep_stat)) {
1da177e4
LT
2570
2571 /* any preceding dma transfers must finish.
2572 * dma handles (M >= N), may empty the queue
2573 */
fae3c158 2574 scan_dma_completions(ep);
ae8e530a
RRD
2575 if (unlikely(list_empty(&ep->queue) ||
2576 ep->out_overflow)) {
1da177e4
LT
2577 req = NULL;
2578 break;
2579 }
fae3c158 2580 req = list_entry(ep->queue.next,
1da177e4
LT
2581 struct net2280_request, queue);
2582
2583 /* here either (M < N), a "real" short rx;
2584 * or (M == N) and the queue didn't empty
2585 */
3e76fdcb 2586 if (likely(t & BIT(FIFO_EMPTY))) {
fae3c158 2587 count = readl(&ep->dma->dmacount);
1da177e4 2588 count &= DMA_BYTE_COUNT_MASK;
fae3c158 2589 if (readl(&ep->dma->dmadesc)
1da177e4
LT
2590 != req->td_dma)
2591 req = NULL;
2592 break;
2593 }
2594 udelay(1);
2595 }
2596
2597 /* stop DMA, leave ep NAKing */
3e76fdcb 2598 writel(BIT(DMA_ABORT), &ep->dma->dmastat);
fae3c158 2599 spin_stop_dma(ep->dma);
1da177e4 2600
fae3c158 2601 if (likely(req)) {
1da177e4 2602 req->td->dmacount = 0;
fae3c158
RRD
2603 t = readl(&ep->regs->ep_avail);
2604 dma_done(ep, req, count,
901b3d75
DB
2605 (ep->out_overflow || t)
2606 ? -EOVERFLOW : 0);
1da177e4
LT
2607 }
2608
2609 /* also flush to prevent erratum 0106 trouble */
ae8e530a
RRD
2610 if (unlikely(ep->out_overflow ||
2611 (ep->dev->chiprev == 0x0100 &&
2612 ep->dev->gadget.speed
2613 == USB_SPEED_FULL))) {
fae3c158 2614 out_flush(ep);
1da177e4
LT
2615 ep->out_overflow = 0;
2616 }
2617
2618 /* (re)start dma if needed, stop NAKing */
2619 ep->stopped = stopped;
fae3c158
RRD
2620 if (!list_empty(&ep->queue))
2621 restart_dma(ep);
1da177e4 2622 } else
e56e69cc 2623 ep_dbg(ep->dev, "%s dma ep_stat %08x ??\n",
1da177e4
LT
2624 ep->ep.name, t);
2625 return;
2626
2627 /* data packet(s) received (in the fifo, OUT) */
3e76fdcb 2628 } else if (t & BIT(DATA_PACKET_RECEIVED_INTERRUPT)) {
fae3c158 2629 if (read_fifo(ep, req) && ep->num != 0)
1da177e4
LT
2630 mode = 2;
2631
2632 /* data packet(s) transmitted (IN) */
3e76fdcb 2633 } else if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT)) {
1da177e4
LT
2634 unsigned len;
2635
2636 len = req->req.length - req->req.actual;
2637 if (len > ep->ep.maxpacket)
2638 len = ep->ep.maxpacket;
2639 req->req.actual += len;
2640
2641 /* if we wrote it all, we're usually done */
fae3c158
RRD
2642 /* send zlps until the status stage */
2643 if ((req->req.actual == req->req.length) &&
2644 (!req->req.zero || len != ep->ep.maxpacket) && ep->num)
1da177e4 2645 mode = 2;
1da177e4
LT
2646
2647 /* there was nothing to do ... */
2648 } else if (mode == 1)
2649 return;
2650
2651 /* done */
2652 if (mode == 2) {
2653 /* stream endpoints often resubmit/unlink in completion */
fae3c158 2654 done(ep, req, 0);
1da177e4
LT
2655
2656 /* maybe advance queue to next request */
2657 if (ep->num == 0) {
2658 /* NOTE: net2280 could let gadget driver start the
2659 * status stage later. since not all controllers let
2660 * them control that, the api doesn't (yet) allow it.
2661 */
2662 if (!ep->stopped)
fae3c158 2663 allow_status(ep);
1da177e4
LT
2664 req = NULL;
2665 } else {
fae3c158
RRD
2666 if (!list_empty(&ep->queue) && !ep->stopped)
2667 req = list_entry(ep->queue.next,
1da177e4
LT
2668 struct net2280_request, queue);
2669 else
2670 req = NULL;
2671 if (req && !ep->is_in)
fae3c158 2672 stop_out_naking(ep);
1da177e4
LT
2673 }
2674 }
2675
2676 /* is there a buffer for the next packet?
2677 * for best streaming performance, make sure there is one.
2678 */
2679 if (req && !ep->stopped) {
2680
2681 /* load IN fifo with next packet (may be zlp) */
3e76fdcb 2682 if (t & BIT(DATA_PACKET_TRANSMITTED_INTERRUPT))
fae3c158 2683 write_fifo(ep, &req->req);
1da177e4
LT
2684 }
2685}
2686
fae3c158 2687static struct net2280_ep *get_ep_by_addr(struct net2280 *dev, u16 wIndex)
1da177e4
LT
2688{
2689 struct net2280_ep *ep;
2690
2691 if ((wIndex & USB_ENDPOINT_NUMBER_MASK) == 0)
fae3c158
RRD
2692 return &dev->ep[0];
2693 list_for_each_entry(ep, &dev->gadget.ep_list, ep.ep_list) {
1da177e4
LT
2694 u8 bEndpointAddress;
2695
2696 if (!ep->desc)
2697 continue;
2698 bEndpointAddress = ep->desc->bEndpointAddress;
2699 if ((wIndex ^ bEndpointAddress) & USB_DIR_IN)
2700 continue;
2701 if ((wIndex & 0x0f) == (bEndpointAddress & 0x0f))
2702 return ep;
2703 }
2704 return NULL;
2705}
2706
adc82f77
RRD
2707static void defect7374_workaround(struct net2280 *dev, struct usb_ctrlrequest r)
2708{
2709 u32 scratch, fsmvalue;
2710 u32 ack_wait_timeout, state;
2711
2712 /* Workaround for Defect 7374 (U1/U2 erroneously rejected): */
2713 scratch = get_idx_reg(dev->regs, SCRATCH);
2714 fsmvalue = scratch & (0xf << DEFECT7374_FSM_FIELD);
2715 scratch &= ~(0xf << DEFECT7374_FSM_FIELD);
2716
2717 if (!((fsmvalue == DEFECT7374_FSM_WAITING_FOR_CONTROL_READ) &&
2718 (r.bRequestType & USB_DIR_IN)))
2719 return;
2720
2721 /* This is the first Control Read for this connection: */
3e76fdcb 2722 if (!(readl(&dev->usb->usbstat) & BIT(SUPER_SPEED_MODE))) {
adc82f77
RRD
2723 /*
2724 * Connection is NOT SS:
2725 * - Connection must be FS or HS.
2726 * - This FSM state should allow workaround software to
2727 * run after the next USB connection.
2728 */
2729 scratch |= DEFECT7374_FSM_NON_SS_CONTROL_READ;
2730 goto restore_data_eps;
2731 }
2732
2733 /* Connection is SS: */
2734 for (ack_wait_timeout = 0;
2735 ack_wait_timeout < DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS;
2736 ack_wait_timeout++) {
2737
2738 state = readl(&dev->plregs->pl_ep_status_1)
2739 & (0xff << STATE);
2740 if ((state >= (ACK_GOOD_NORMAL << STATE)) &&
2741 (state <= (ACK_GOOD_MORE_ACKS_TO_COME << STATE))) {
2742 scratch |= DEFECT7374_FSM_SS_CONTROL_READ;
2743 break;
2744 }
2745
2746 /*
2747 * We have not yet received host's Data Phase ACK
2748 * - Wait and try again.
2749 */
2750 udelay(DEFECT_7374_PROCESSOR_WAIT_TIME);
2751
2752 continue;
2753 }
2754
2755
2756 if (ack_wait_timeout >= DEFECT_7374_NUMBEROF_MAX_WAIT_LOOPS) {
e56e69cc 2757 ep_err(dev, "FAIL: Defect 7374 workaround waited but failed "
adc82f77 2758 "to detect SS host's data phase ACK.");
e56e69cc 2759 ep_err(dev, "PL_EP_STATUS_1(23:16):.Expected from 0x11 to 0x16"
adc82f77
RRD
2760 "got 0x%2.2x.\n", state >> STATE);
2761 } else {
e56e69cc 2762 ep_warn(dev, "INFO: Defect 7374 workaround waited about\n"
adc82f77
RRD
2763 "%duSec for Control Read Data Phase ACK\n",
2764 DEFECT_7374_PROCESSOR_WAIT_TIME * ack_wait_timeout);
2765 }
2766
2767restore_data_eps:
2768 /*
2769 * Restore data EPs to their pre-workaround settings (disabled,
2770 * initialized, and other details).
2771 */
2772 defect7374_disable_data_eps(dev);
2773
2774 set_idx_reg(dev->regs, SCRATCH, scratch);
2775
2776 return;
2777}
2778
2779static void ep_stall(struct net2280_ep *ep, int stall)
2780{
2781 struct net2280 *dev = ep->dev;
2782 u32 val;
2783 static const u32 ep_pl[9] = { 0, 3, 4, 7, 8, 2, 5, 6, 9 };
2784
2785 if (stall) {
3e76fdcb
RRD
2786 writel(BIT(SET_ENDPOINT_HALT) |
2787 /* BIT(SET_NAK_PACKETS) | */
2788 BIT(CLEAR_CONTROL_STATUS_PHASE_HANDSHAKE),
adc82f77
RRD
2789 &ep->regs->ep_rsp);
2790 ep->is_halt = 1;
2791 } else {
2792 if (dev->gadget.speed == USB_SPEED_SUPER) {
2793 /*
2794 * Workaround for SS SeqNum not cleared via
2795 * Endpoint Halt (Clear) bit. select endpoint
2796 */
2797 val = readl(&dev->plregs->pl_ep_ctrl);
2798 val = (val & ~0x1f) | ep_pl[ep->num];
2799 writel(val, &dev->plregs->pl_ep_ctrl);
2800
3e76fdcb 2801 val |= BIT(SEQUENCE_NUMBER_RESET);
adc82f77
RRD
2802 writel(val, &dev->plregs->pl_ep_ctrl);
2803 }
2804 val = readl(&ep->regs->ep_rsp);
3e76fdcb
RRD
2805 val |= BIT(CLEAR_ENDPOINT_HALT) |
2806 BIT(CLEAR_ENDPOINT_TOGGLE);
ae8e530a
RRD
2807 writel(val,
2808 /* | BIT(CLEAR_NAK_PACKETS),*/
2809 &ep->regs->ep_rsp);
adc82f77
RRD
2810 ep->is_halt = 0;
2811 val = readl(&ep->regs->ep_rsp);
2812 }
2813}
2814
2815static void ep_stdrsp(struct net2280_ep *ep, int value, int wedged)
2816{
2817 /* set/clear, then synch memory views with the device */
2818 if (value) {
2819 ep->stopped = 1;
2820 if (ep->num == 0)
2821 ep->dev->protocol_stall = 1;
2822 else {
2823 if (ep->dma)
2824 ep_stop_dma(ep);
2825 ep_stall(ep, true);
2826 }
2827
2828 if (wedged)
2829 ep->wedged = 1;
2830 } else {
2831 ep->stopped = 0;
2832 ep->wedged = 0;
2833
2834 ep_stall(ep, false);
2835
2836 /* Flush the queue */
2837 if (!list_empty(&ep->queue)) {
2838 struct net2280_request *req =
2839 list_entry(ep->queue.next, struct net2280_request,
2840 queue);
2841 if (ep->dma)
2842 resume_dma(ep);
2843 else {
2844 if (ep->is_in)
2845 write_fifo(ep, &req->req);
2846 else {
2847 if (read_fifo(ep, req))
2848 done(ep, req, 0);
2849 }
2850 }
2851 }
2852 }
2853}
2854
2855static void handle_stat0_irqs_superspeed(struct net2280 *dev,
2856 struct net2280_ep *ep, struct usb_ctrlrequest r)
2857{
2858 int tmp = 0;
2859
2860#define w_value le16_to_cpu(r.wValue)
2861#define w_index le16_to_cpu(r.wIndex)
2862#define w_length le16_to_cpu(r.wLength)
2863
2864 switch (r.bRequest) {
2865 struct net2280_ep *e;
2866 u16 status;
2867
2868 case USB_REQ_SET_CONFIGURATION:
2869 dev->addressed_state = !w_value;
2870 goto usb3_delegate;
2871
2872 case USB_REQ_GET_STATUS:
2873 switch (r.bRequestType) {
2874 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2875 status = dev->wakeup_enable ? 0x02 : 0x00;
2876 if (dev->selfpowered)
3e76fdcb 2877 status |= BIT(0);
adc82f77
RRD
2878 status |= (dev->u1_enable << 2 | dev->u2_enable << 3 |
2879 dev->ltm_enable << 4);
2880 writel(0, &dev->epregs[0].ep_irqenb);
2881 set_fifo_bytecount(ep, sizeof(status));
2882 writel((__force u32) status, &dev->epregs[0].ep_data);
2883 allow_status_338x(ep);
2884 break;
2885
2886 case (USB_DIR_IN | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2887 e = get_ep_by_addr(dev, w_index);
2888 if (!e)
2889 goto do_stall3;
2890 status = readl(&e->regs->ep_rsp) &
3e76fdcb 2891 BIT(CLEAR_ENDPOINT_HALT);
adc82f77
RRD
2892 writel(0, &dev->epregs[0].ep_irqenb);
2893 set_fifo_bytecount(ep, sizeof(status));
2894 writel((__force u32) status, &dev->epregs[0].ep_data);
2895 allow_status_338x(ep);
2896 break;
2897
2898 default:
2899 goto usb3_delegate;
2900 }
2901 break;
2902
2903 case USB_REQ_CLEAR_FEATURE:
2904 switch (r.bRequestType) {
2905 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2906 if (!dev->addressed_state) {
2907 switch (w_value) {
2908 case USB_DEVICE_U1_ENABLE:
2909 dev->u1_enable = 0;
2910 writel(readl(&dev->usb_ext->usbctl2) &
3e76fdcb 2911 ~BIT(U1_ENABLE),
adc82f77
RRD
2912 &dev->usb_ext->usbctl2);
2913 allow_status_338x(ep);
2914 goto next_endpoints3;
2915
2916 case USB_DEVICE_U2_ENABLE:
2917 dev->u2_enable = 0;
2918 writel(readl(&dev->usb_ext->usbctl2) &
3e76fdcb 2919 ~BIT(U2_ENABLE),
adc82f77
RRD
2920 &dev->usb_ext->usbctl2);
2921 allow_status_338x(ep);
2922 goto next_endpoints3;
2923
2924 case USB_DEVICE_LTM_ENABLE:
2925 dev->ltm_enable = 0;
2926 writel(readl(&dev->usb_ext->usbctl2) &
3e76fdcb 2927 ~BIT(LTM_ENABLE),
adc82f77
RRD
2928 &dev->usb_ext->usbctl2);
2929 allow_status_338x(ep);
2930 goto next_endpoints3;
2931
2932 default:
2933 break;
2934 }
2935 }
2936 if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
2937 dev->wakeup_enable = 0;
2938 writel(readl(&dev->usb->usbctl) &
3e76fdcb 2939 ~BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
adc82f77
RRD
2940 &dev->usb->usbctl);
2941 allow_status_338x(ep);
2942 break;
2943 }
2944 goto usb3_delegate;
2945
2946 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
2947 e = get_ep_by_addr(dev, w_index);
2948 if (!e)
2949 goto do_stall3;
2950 if (w_value != USB_ENDPOINT_HALT)
2951 goto do_stall3;
e56e69cc 2952 ep_vdbg(dev, "%s clear halt\n", e->ep.name);
adc82f77
RRD
2953 ep_stall(e, false);
2954 if (!list_empty(&e->queue) && e->td_dma)
2955 restart_dma(e);
2956 allow_status(ep);
2957 ep->stopped = 1;
2958 break;
2959
2960 default:
2961 goto usb3_delegate;
2962 }
2963 break;
2964 case USB_REQ_SET_FEATURE:
2965 switch (r.bRequestType) {
2966 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE):
2967 if (!dev->addressed_state) {
2968 switch (w_value) {
2969 case USB_DEVICE_U1_ENABLE:
2970 dev->u1_enable = 1;
2971 writel(readl(&dev->usb_ext->usbctl2) |
3e76fdcb 2972 BIT(U1_ENABLE),
adc82f77
RRD
2973 &dev->usb_ext->usbctl2);
2974 allow_status_338x(ep);
2975 goto next_endpoints3;
2976
2977 case USB_DEVICE_U2_ENABLE:
2978 dev->u2_enable = 1;
2979 writel(readl(&dev->usb_ext->usbctl2) |
3e76fdcb 2980 BIT(U2_ENABLE),
adc82f77
RRD
2981 &dev->usb_ext->usbctl2);
2982 allow_status_338x(ep);
2983 goto next_endpoints3;
2984
2985 case USB_DEVICE_LTM_ENABLE:
2986 dev->ltm_enable = 1;
2987 writel(readl(&dev->usb_ext->usbctl2) |
3e76fdcb 2988 BIT(LTM_ENABLE),
adc82f77
RRD
2989 &dev->usb_ext->usbctl2);
2990 allow_status_338x(ep);
2991 goto next_endpoints3;
2992 default:
2993 break;
2994 }
2995 }
2996
2997 if (w_value == USB_DEVICE_REMOTE_WAKEUP) {
2998 dev->wakeup_enable = 1;
2999 writel(readl(&dev->usb->usbctl) |
3e76fdcb 3000 BIT(DEVICE_REMOTE_WAKEUP_ENABLE),
adc82f77
RRD
3001 &dev->usb->usbctl);
3002 allow_status_338x(ep);
3003 break;
3004 }
3005 goto usb3_delegate;
3006
3007 case (USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_ENDPOINT):
3008 e = get_ep_by_addr(dev, w_index);
3009 if (!e || (w_value != USB_ENDPOINT_HALT))
3010 goto do_stall3;
3011 ep_stdrsp(e, true, false);
3012 allow_status_338x(ep);
3013 break;
3014
3015 default:
3016 goto usb3_delegate;
3017 }
3018
3019 break;
3020 default:
3021
3022usb3_delegate:
e56e69cc 3023 ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x ep_cfg %08x\n",
adc82f77
RRD
3024 r.bRequestType, r.bRequest,
3025 w_value, w_index, w_length,
3026 readl(&ep->cfg->ep_cfg));
3027
3028 ep->responded = 0;
3029 spin_unlock(&dev->lock);
3030 tmp = dev->driver->setup(&dev->gadget, &r);
3031 spin_lock(&dev->lock);
3032 }
3033do_stall3:
3034 if (tmp < 0) {
e56e69cc 3035 ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n",
adc82f77
RRD
3036 r.bRequestType, r.bRequest, tmp);
3037 dev->protocol_stall = 1;
3038 /* TD 9.9 Halt Endpoint test. TD 9.22 Set feature test */
3039 ep_stall(ep, true);
3040 }
3041
3042next_endpoints3:
3043
3044#undef w_value
3045#undef w_index
3046#undef w_length
3047
3048 return;
3049}
3050
fae3c158 3051static void handle_stat0_irqs(struct net2280 *dev, u32 stat)
1da177e4
LT
3052{
3053 struct net2280_ep *ep;
3054 u32 num, scratch;
3055
3056 /* most of these don't need individual acks */
3e76fdcb 3057 stat &= ~BIT(INTA_ASSERTED);
1da177e4
LT
3058 if (!stat)
3059 return;
e56e69cc 3060 /* ep_dbg(dev, "irqstat0 %04x\n", stat); */
1da177e4
LT
3061
3062 /* starting a control request? */
3e76fdcb 3063 if (unlikely(stat & BIT(SETUP_PACKET_INTERRUPT))) {
1da177e4 3064 union {
fae3c158 3065 u32 raw[2];
1da177e4
LT
3066 struct usb_ctrlrequest r;
3067 } u;
950ee4c8 3068 int tmp;
1da177e4
LT
3069 struct net2280_request *req;
3070
3071 if (dev->gadget.speed == USB_SPEED_UNKNOWN) {
adc82f77 3072 u32 val = readl(&dev->usb->usbstat);
3e76fdcb 3073 if (val & BIT(SUPER_SPEED)) {
adc82f77
RRD
3074 dev->gadget.speed = USB_SPEED_SUPER;
3075 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
3076 EP0_SS_MAX_PACKET_SIZE);
3e76fdcb 3077 } else if (val & BIT(HIGH_SPEED)) {
1da177e4 3078 dev->gadget.speed = USB_SPEED_HIGH;
adc82f77
RRD
3079 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
3080 EP0_HS_MAX_PACKET_SIZE);
3081 } else {
1da177e4 3082 dev->gadget.speed = USB_SPEED_FULL;
adc82f77
RRD
3083 usb_ep_set_maxpacket_limit(&dev->ep[0].ep,
3084 EP0_HS_MAX_PACKET_SIZE);
3085 }
fae3c158 3086 net2280_led_speed(dev, dev->gadget.speed);
e56e69cc 3087 ep_dbg(dev, "%s\n",
fae3c158 3088 usb_speed_string(dev->gadget.speed));
1da177e4
LT
3089 }
3090
fae3c158 3091 ep = &dev->ep[0];
1da177e4
LT
3092 ep->irqs++;
3093
3094 /* make sure any leftover request state is cleared */
3e76fdcb 3095 stat &= ~BIT(ENDPOINT_0_INTERRUPT);
fae3c158
RRD
3096 while (!list_empty(&ep->queue)) {
3097 req = list_entry(ep->queue.next,
1da177e4 3098 struct net2280_request, queue);
fae3c158 3099 done(ep, req, (req->req.actual == req->req.length)
1da177e4
LT
3100 ? 0 : -EPROTO);
3101 }
3102 ep->stopped = 0;
3103 dev->protocol_stall = 0;
2eeb0016 3104 if (dev->quirks & PLX_SUPERSPEED)
adc82f77
RRD
3105 ep->is_halt = 0;
3106 else{
2eeb0016 3107 if (ep->dev->quirks & PLX_2280)
3e76fdcb
RRD
3108 tmp = BIT(FIFO_OVERFLOW) |
3109 BIT(FIFO_UNDERFLOW);
adc82f77
RRD
3110 else
3111 tmp = 0;
3112
3e76fdcb
RRD
3113 writel(tmp | BIT(TIMEOUT) |
3114 BIT(USB_STALL_SENT) |
3115 BIT(USB_IN_NAK_SENT) |
3116 BIT(USB_IN_ACK_RCVD) |
3117 BIT(USB_OUT_PING_NAK_SENT) |
3118 BIT(USB_OUT_ACK_SENT) |
3119 BIT(SHORT_PACKET_OUT_DONE_INTERRUPT) |
3120 BIT(SHORT_PACKET_TRANSFERRED_INTERRUPT) |
3121 BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
3122 BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
3123 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
ae8e530a
RRD
3124 BIT(DATA_IN_TOKEN_INTERRUPT),
3125 &ep->regs->ep_stat);
adc82f77
RRD
3126 }
3127 u.raw[0] = readl(&dev->usb->setup0123);
3128 u.raw[1] = readl(&dev->usb->setup4567);
901b3d75 3129
fae3c158
RRD
3130 cpu_to_le32s(&u.raw[0]);
3131 cpu_to_le32s(&u.raw[1]);
1da177e4 3132
2eeb0016 3133 if (dev->quirks & PLX_SUPERSPEED)
adc82f77
RRD
3134 defect7374_workaround(dev, u.r);
3135
950ee4c8
GL
3136 tmp = 0;
3137
01ee7d70
DB
3138#define w_value le16_to_cpu(u.r.wValue)
3139#define w_index le16_to_cpu(u.r.wIndex)
3140#define w_length le16_to_cpu(u.r.wLength)
1da177e4
LT
3141
3142 /* ack the irq */
3e76fdcb
RRD
3143 writel(BIT(SETUP_PACKET_INTERRUPT), &dev->regs->irqstat0);
3144 stat ^= BIT(SETUP_PACKET_INTERRUPT);
1da177e4
LT
3145
3146 /* watch control traffic at the token level, and force
3147 * synchronization before letting the status stage happen.
3148 * FIXME ignore tokens we'll NAK, until driver responds.
3149 * that'll mean a lot less irqs for some drivers.
3150 */
3151 ep->is_in = (u.r.bRequestType & USB_DIR_IN) != 0;
3152 if (ep->is_in) {
3e76fdcb
RRD
3153 scratch = BIT(DATA_PACKET_TRANSMITTED_INTERRUPT) |
3154 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
3155 BIT(DATA_IN_TOKEN_INTERRUPT);
fae3c158 3156 stop_out_naking(ep);
1da177e4 3157 } else
3e76fdcb
RRD
3158 scratch = BIT(DATA_PACKET_RECEIVED_INTERRUPT) |
3159 BIT(DATA_OUT_PING_TOKEN_INTERRUPT) |
3160 BIT(DATA_IN_TOKEN_INTERRUPT);
fae3c158 3161 writel(scratch, &dev->epregs[0].ep_irqenb);
1da177e4
LT
3162
3163 /* we made the hardware handle most lowlevel requests;
3164 * everything else goes uplevel to the gadget code.
3165 */
1f26e28d 3166 ep->responded = 1;
adc82f77
RRD
3167
3168 if (dev->gadget.speed == USB_SPEED_SUPER) {
3169 handle_stat0_irqs_superspeed(dev, ep, u.r);
3170 goto next_endpoints;
3171 }
3172
1da177e4
LT
3173 switch (u.r.bRequest) {
3174 case USB_REQ_GET_STATUS: {
3175 struct net2280_ep *e;
320f3459 3176 __le32 status;
1da177e4
LT
3177
3178 /* hw handles device and interface status */
3179 if (u.r.bRequestType != (USB_DIR_IN|USB_RECIP_ENDPOINT))
3180 goto delegate;
fae3c158
RRD
3181 e = get_ep_by_addr(dev, w_index);
3182 if (!e || w_length > 2)
1da177e4
LT
3183 goto do_stall;
3184
3e76fdcb 3185 if (readl(&e->regs->ep_rsp) & BIT(SET_ENDPOINT_HALT))
fae3c158 3186 status = cpu_to_le32(1);
1da177e4 3187 else
fae3c158 3188 status = cpu_to_le32(0);
1da177e4
LT
3189
3190 /* don't bother with a request object! */
fae3c158
RRD
3191 writel(0, &dev->epregs[0].ep_irqenb);
3192 set_fifo_bytecount(ep, w_length);
3193 writel((__force u32)status, &dev->epregs[0].ep_data);
3194 allow_status(ep);
e56e69cc 3195 ep_vdbg(dev, "%s stat %02x\n", ep->ep.name, status);
1da177e4
LT
3196 goto next_endpoints;
3197 }
3198 break;
3199 case USB_REQ_CLEAR_FEATURE: {
3200 struct net2280_ep *e;
3201
3202 /* hw handles device features */
3203 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
3204 goto delegate;
ae8e530a 3205 if (w_value != USB_ENDPOINT_HALT || w_length != 0)
1da177e4 3206 goto do_stall;
fae3c158
RRD
3207 e = get_ep_by_addr(dev, w_index);
3208 if (!e)
1da177e4 3209 goto do_stall;
8066134f 3210 if (e->wedged) {
e56e69cc 3211 ep_vdbg(dev, "%s wedged, halt not cleared\n",
8066134f
AS
3212 ep->ep.name);
3213 } else {
e56e69cc 3214 ep_vdbg(dev, "%s clear halt\n", e->ep.name);
8066134f 3215 clear_halt(e);
2eeb0016 3216 if ((ep->dev->quirks & PLX_SUPERSPEED) &&
adc82f77
RRD
3217 !list_empty(&e->queue) && e->td_dma)
3218 restart_dma(e);
8066134f 3219 }
fae3c158 3220 allow_status(ep);
1da177e4
LT
3221 goto next_endpoints;
3222 }
3223 break;
3224 case USB_REQ_SET_FEATURE: {
3225 struct net2280_ep *e;
3226
3227 /* hw handles device features */
3228 if (u.r.bRequestType != USB_RECIP_ENDPOINT)
3229 goto delegate;
ae8e530a 3230 if (w_value != USB_ENDPOINT_HALT || w_length != 0)
1da177e4 3231 goto do_stall;
fae3c158
RRD
3232 e = get_ep_by_addr(dev, w_index);
3233 if (!e)
1da177e4 3234 goto do_stall;
8066134f
AS
3235 if (e->ep.name == ep0name)
3236 goto do_stall;
fae3c158 3237 set_halt(e);
2eeb0016 3238 if ((dev->quirks & PLX_SUPERSPEED) && e->dma)
adc82f77 3239 abort_dma(e);
fae3c158 3240 allow_status(ep);
e56e69cc 3241 ep_vdbg(dev, "%s set halt\n", ep->ep.name);
1da177e4
LT
3242 goto next_endpoints;
3243 }
3244 break;
3245 default:
3246delegate:
e56e69cc 3247 ep_vdbg(dev, "setup %02x.%02x v%04x i%04x l%04x "
1da177e4
LT
3248 "ep_cfg %08x\n",
3249 u.r.bRequestType, u.r.bRequest,
320f3459 3250 w_value, w_index, w_length,
adc82f77 3251 readl(&ep->cfg->ep_cfg));
1f26e28d 3252 ep->responded = 0;
fae3c158
RRD
3253 spin_unlock(&dev->lock);
3254 tmp = dev->driver->setup(&dev->gadget, &u.r);
3255 spin_lock(&dev->lock);
1da177e4
LT
3256 }
3257
3258 /* stall ep0 on error */
3259 if (tmp < 0) {
3260do_stall:
e56e69cc 3261 ep_vdbg(dev, "req %02x.%02x protocol STALL; stat %d\n",
1da177e4
LT
3262 u.r.bRequestType, u.r.bRequest, tmp);
3263 dev->protocol_stall = 1;
3264 }
3265
3266 /* some in/out token irq should follow; maybe stall then.
3267 * driver must queue a request (even zlp) or halt ep0
3268 * before the host times out.
3269 */
3270 }
3271
320f3459
DB
3272#undef w_value
3273#undef w_index
3274#undef w_length
3275
1da177e4
LT
3276next_endpoints:
3277 /* endpoint data irq ? */
3278 scratch = stat & 0x7f;
3279 stat &= ~0x7f;
3280 for (num = 0; scratch; num++) {
3281 u32 t;
3282
3283 /* do this endpoint's FIFO and queue need tending? */
3e76fdcb 3284 t = BIT(num);
1da177e4
LT
3285 if ((scratch & t) == 0)
3286 continue;
3287 scratch ^= t;
3288
fae3c158
RRD
3289 ep = &dev->ep[num];
3290 handle_ep_small(ep);
1da177e4
LT
3291 }
3292
3293 if (stat)
e56e69cc 3294 ep_dbg(dev, "unhandled irqstat0 %08x\n", stat);
1da177e4
LT
3295}
3296
3e76fdcb
RRD
3297#define DMA_INTERRUPTS (BIT(DMA_D_INTERRUPT) | \
3298 BIT(DMA_C_INTERRUPT) | \
3299 BIT(DMA_B_INTERRUPT) | \
3300 BIT(DMA_A_INTERRUPT))
1da177e4 3301#define PCI_ERROR_INTERRUPTS ( \
3e76fdcb
RRD
3302 BIT(PCI_MASTER_ABORT_RECEIVED_INTERRUPT) | \
3303 BIT(PCI_TARGET_ABORT_RECEIVED_INTERRUPT) | \
3304 BIT(PCI_RETRY_ABORT_INTERRUPT))
1da177e4 3305
fae3c158 3306static void handle_stat1_irqs(struct net2280 *dev, u32 stat)
1da177e4
LT
3307{
3308 struct net2280_ep *ep;
3309 u32 tmp, num, mask, scratch;
3310
3311 /* after disconnect there's nothing else to do! */
3e76fdcb
RRD
3312 tmp = BIT(VBUS_INTERRUPT) | BIT(ROOT_PORT_RESET_INTERRUPT);
3313 mask = BIT(SUPER_SPEED) | BIT(HIGH_SPEED) | BIT(FULL_SPEED);
1da177e4
LT
3314
3315 /* VBUS disconnect is indicated by VBUS_PIN and VBUS_INTERRUPT set.
fb914ebf 3316 * Root Port Reset is indicated by ROOT_PORT_RESET_INTERRUPT set and
901b3d75 3317 * both HIGH_SPEED and FULL_SPEED clear (as ROOT_PORT_RESET_INTERRUPT
1da177e4
LT
3318 * only indicates a change in the reset state).
3319 */
3320 if (stat & tmp) {
fae3c158 3321 writel(tmp, &dev->regs->irqstat1);
ae8e530a 3322 if ((((stat & BIT(ROOT_PORT_RESET_INTERRUPT)) &&
ac9d032e 3323 ((readl(&dev->usb->usbstat) & mask) == 0)) ||
ae8e530a
RRD
3324 ((readl(&dev->usb->usbctl) &
3325 BIT(VBUS_PIN)) == 0)) &&
3326 (dev->gadget.speed != USB_SPEED_UNKNOWN)) {
e56e69cc 3327 ep_dbg(dev, "disconnect %s\n",
1da177e4 3328 dev->driver->driver.name);
fae3c158
RRD
3329 stop_activity(dev, dev->driver);
3330 ep0_start(dev);
1da177e4
LT
3331 return;
3332 }
3333 stat &= ~tmp;
3334
3335 /* vBUS can bounce ... one of many reasons to ignore the
3336 * notion of hotplug events on bus connect/disconnect!
3337 */
3338 if (!stat)
3339 return;
3340 }
3341
3342 /* NOTE: chip stays in PCI D0 state for now, but it could
3343 * enter D1 to save more power
3344 */
3e76fdcb 3345 tmp = BIT(SUSPEND_REQUEST_CHANGE_INTERRUPT);
1da177e4 3346 if (stat & tmp) {
fae3c158 3347 writel(tmp, &dev->regs->irqstat1);
3e76fdcb 3348 if (stat & BIT(SUSPEND_REQUEST_INTERRUPT)) {
1da177e4 3349 if (dev->driver->suspend)
fae3c158 3350 dev->driver->suspend(&dev->gadget);
1da177e4 3351 if (!enable_suspend)
3e76fdcb 3352 stat &= ~BIT(SUSPEND_REQUEST_INTERRUPT);
1da177e4
LT
3353 } else {
3354 if (dev->driver->resume)
fae3c158 3355 dev->driver->resume(&dev->gadget);
1da177e4
LT
3356 /* at high speed, note erratum 0133 */
3357 }
3358 stat &= ~tmp;
3359 }
3360
3361 /* clear any other status/irqs */
3362 if (stat)
fae3c158 3363 writel(stat, &dev->regs->irqstat1);
1da177e4
LT
3364
3365 /* some status we can just ignore */
2eeb0016 3366 if (dev->quirks & PLX_2280)
3e76fdcb
RRD
3367 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
3368 BIT(SUSPEND_REQUEST_INTERRUPT) |
3369 BIT(RESUME_INTERRUPT) |
3370 BIT(SOF_INTERRUPT));
950ee4c8 3371 else
3e76fdcb
RRD
3372 stat &= ~(BIT(CONTROL_STATUS_INTERRUPT) |
3373 BIT(RESUME_INTERRUPT) |
3374 BIT(SOF_DOWN_INTERRUPT) |
3375 BIT(SOF_INTERRUPT));
950ee4c8 3376
1da177e4
LT
3377 if (!stat)
3378 return;
e56e69cc 3379 /* ep_dbg(dev, "irqstat1 %08x\n", stat);*/
1da177e4
LT
3380
3381 /* DMA status, for ep-{a,b,c,d} */
3382 scratch = stat & DMA_INTERRUPTS;
3383 stat &= ~DMA_INTERRUPTS;
3384 scratch >>= 9;
3385 for (num = 0; scratch; num++) {
3386 struct net2280_dma_regs __iomem *dma;
3387
3e76fdcb 3388 tmp = BIT(num);
1da177e4
LT
3389 if ((tmp & scratch) == 0)
3390 continue;
3391 scratch ^= tmp;
3392
fae3c158 3393 ep = &dev->ep[num + 1];
1da177e4
LT
3394 dma = ep->dma;
3395
3396 if (!dma)
3397 continue;
3398
3399 /* clear ep's dma status */
fae3c158
RRD
3400 tmp = readl(&dma->dmastat);
3401 writel(tmp, &dma->dmastat);
1da177e4 3402
adc82f77 3403 /* dma sync*/
2eeb0016 3404 if (dev->quirks & PLX_SUPERSPEED) {
adc82f77
RRD
3405 u32 r_dmacount = readl(&dma->dmacount);
3406 if (!ep->is_in && (r_dmacount & 0x00FFFFFF) &&
3e76fdcb 3407 (tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT)))
adc82f77
RRD
3408 continue;
3409 }
3410
1da177e4
LT
3411 /* chaining should stop on abort, short OUT from fifo,
3412 * or (stat0 codepath) short OUT transfer.
3413 */
3414 if (!use_dma_chaining) {
3e76fdcb 3415 if (!(tmp & BIT(DMA_TRANSACTION_DONE_INTERRUPT))) {
e56e69cc 3416 ep_dbg(ep->dev, "%s no xact done? %08x\n",
1da177e4
LT
3417 ep->ep.name, tmp);
3418 continue;
3419 }
fae3c158 3420 stop_dma(ep->dma);
1da177e4
LT
3421 }
3422
3423 /* OUT transfers terminate when the data from the
3424 * host is in our memory. Process whatever's done.
3425 * On this path, we know transfer's last packet wasn't
3426 * less than req->length. NAK_OUT_PACKETS may be set,
3427 * or the FIFO may already be holding new packets.
3428 *
3429 * IN transfers can linger in the FIFO for a very
3430 * long time ... we ignore that for now, accounting
3431 * precisely (like PIO does) needs per-packet irqs
3432 */
fae3c158 3433 scan_dma_completions(ep);
1da177e4
LT
3434
3435 /* disable dma on inactive queues; else maybe restart */
fae3c158 3436 if (list_empty(&ep->queue)) {
1da177e4 3437 if (use_dma_chaining)
fae3c158 3438 stop_dma(ep->dma);
1da177e4 3439 } else {
fae3c158 3440 tmp = readl(&dma->dmactl);
3e76fdcb 3441 if (!use_dma_chaining || (tmp & BIT(DMA_ENABLE)) == 0)
fae3c158 3442 restart_dma(ep);
1da177e4
LT
3443 else if (ep->is_in && use_dma_chaining) {
3444 struct net2280_request *req;
320f3459 3445 __le32 dmacount;
1da177e4
LT
3446
3447 /* the descriptor at the head of the chain
3448 * may still have VALID_BIT clear; that's
3449 * used to trigger changing DMA_FIFO_VALIDATE
3450 * (affects automagic zlp writes).
3451 */
fae3c158 3452 req = list_entry(ep->queue.next,
1da177e4
LT
3453 struct net2280_request, queue);
3454 dmacount = req->td->dmacount;
3e76fdcb
RRD
3455 dmacount &= cpu_to_le32(BIT(VALID_BIT) |
3456 DMA_BYTE_COUNT_MASK);
1da177e4 3457 if (dmacount && (dmacount & valid_bit) == 0)
fae3c158 3458 restart_dma(ep);
1da177e4
LT
3459 }
3460 }
3461 ep->irqs++;
3462 }
3463
3464 /* NOTE: there are other PCI errors we might usefully notice.
3465 * if they appear very often, here's where to try recovering.
3466 */
3467 if (stat & PCI_ERROR_INTERRUPTS) {
e56e69cc 3468 ep_err(dev, "pci dma error; stat %08x\n", stat);
1da177e4
LT
3469 stat &= ~PCI_ERROR_INTERRUPTS;
3470 /* these are fatal errors, but "maybe" they won't
3471 * happen again ...
3472 */
fae3c158
RRD
3473 stop_activity(dev, dev->driver);
3474 ep0_start(dev);
1da177e4
LT
3475 stat = 0;
3476 }
3477
3478 if (stat)
e56e69cc 3479 ep_dbg(dev, "unhandled irqstat1 %08x\n", stat);
1da177e4
LT
3480}
3481
fae3c158 3482static irqreturn_t net2280_irq(int irq, void *_dev)
1da177e4
LT
3483{
3484 struct net2280 *dev = _dev;
3485
658ad5e0 3486 /* shared interrupt, not ours */
2eeb0016 3487 if ((dev->quirks & PLX_LEGACY) &&
3e76fdcb 3488 (!(readl(&dev->regs->irqstat0) & BIT(INTA_ASSERTED))))
658ad5e0
AS
3489 return IRQ_NONE;
3490
fae3c158 3491 spin_lock(&dev->lock);
1da177e4
LT
3492
3493 /* handle disconnect, dma, and more */
fae3c158 3494 handle_stat1_irqs(dev, readl(&dev->regs->irqstat1));
1da177e4
LT
3495
3496 /* control requests and PIO */
fae3c158 3497 handle_stat0_irqs(dev, readl(&dev->regs->irqstat0));
1da177e4 3498
2eeb0016 3499 if (dev->quirks & PLX_SUPERSPEED) {
adc82f77
RRD
3500 /* re-enable interrupt to trigger any possible new interrupt */
3501 u32 pciirqenb1 = readl(&dev->regs->pciirqenb1);
3502 writel(pciirqenb1 & 0x7FFFFFFF, &dev->regs->pciirqenb1);
3503 writel(pciirqenb1, &dev->regs->pciirqenb1);
3504 }
3505
fae3c158 3506 spin_unlock(&dev->lock);
1da177e4
LT
3507
3508 return IRQ_HANDLED;
3509}
3510
3511/*-------------------------------------------------------------------------*/
3512
fae3c158 3513static void gadget_release(struct device *_dev)
1da177e4 3514{
fae3c158 3515 struct net2280 *dev = dev_get_drvdata(_dev);
1da177e4 3516
fae3c158 3517 kfree(dev);
1da177e4
LT
3518}
3519
3520/* tear down the binding between this driver and the pci device */
3521
fae3c158 3522static void net2280_remove(struct pci_dev *pdev)
1da177e4 3523{
fae3c158 3524 struct net2280 *dev = pci_get_drvdata(pdev);
1da177e4 3525
0f91349b
SAS
3526 usb_del_gadget_udc(&dev->gadget);
3527
6bea476c 3528 BUG_ON(dev->driver);
1da177e4
LT
3529
3530 /* then clean up the resources we allocated during probe() */
fae3c158 3531 net2280_led_shutdown(dev);
1da177e4
LT
3532 if (dev->requests) {
3533 int i;
3534 for (i = 1; i < 5; i++) {
fae3c158 3535 if (!dev->ep[i].dummy)
1da177e4 3536 continue;
fae3c158
RRD
3537 pci_pool_free(dev->requests, dev->ep[i].dummy,
3538 dev->ep[i].td_dma);
1da177e4 3539 }
fae3c158 3540 pci_pool_destroy(dev->requests);
1da177e4
LT
3541 }
3542 if (dev->got_irq)
fae3c158 3543 free_irq(pdev->irq, dev);
2eeb0016 3544 if (use_msi && dev->quirks & PLX_SUPERSPEED)
adc82f77 3545 pci_disable_msi(pdev);
1da177e4 3546 if (dev->regs)
fae3c158 3547 iounmap(dev->regs);
1da177e4 3548 if (dev->region)
fae3c158
RRD
3549 release_mem_region(pci_resource_start(pdev, 0),
3550 pci_resource_len(pdev, 0));
1da177e4 3551 if (dev->enabled)
fae3c158
RRD
3552 pci_disable_device(pdev);
3553 device_remove_file(&pdev->dev, &dev_attr_registers);
1da177e4 3554
e56e69cc 3555 ep_info(dev, "unbind\n");
1da177e4
LT
3556}
3557
3558/* wrap this driver around the specified device, but
3559 * don't respond over USB until a gadget driver binds to us.
3560 */
3561
fae3c158 3562static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1da177e4
LT
3563{
3564 struct net2280 *dev;
3565 unsigned long resource, len;
3566 void __iomem *base = NULL;
3567 int retval, i;
1da177e4 3568
9a028e46
RRD
3569 if (!use_dma)
3570 use_dma_chaining = 0;
3571
1da177e4 3572 /* alloc, and start init */
fae3c158
RRD
3573 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3574 if (dev == NULL) {
1da177e4
LT
3575 retval = -ENOMEM;
3576 goto done;
3577 }
3578
fae3c158
RRD
3579 pci_set_drvdata(pdev, dev);
3580 spin_lock_init(&dev->lock);
2eeb0016 3581 dev->quirks = id->driver_data;
1da177e4
LT
3582 dev->pdev = pdev;
3583 dev->gadget.ops = &net2280_ops;
2eeb0016 3584 dev->gadget.max_speed = (dev->quirks & PLX_SUPERSPEED) ?
adc82f77 3585 USB_SPEED_SUPER : USB_SPEED_HIGH;
1da177e4
LT
3586
3587 /* the "gadget" abstracts/virtualizes the controller */
1da177e4
LT
3588 dev->gadget.name = driver_name;
3589
3590 /* now all the pci goodies ... */
fae3c158
RRD
3591 if (pci_enable_device(pdev) < 0) {
3592 retval = -ENODEV;
1da177e4
LT
3593 goto done;
3594 }
3595 dev->enabled = 1;
3596
3597 /* BAR 0 holds all the registers
3598 * BAR 1 is 8051 memory; unused here (note erratum 0103)
3599 * BAR 2 is fifo memory; unused here
3600 */
fae3c158
RRD
3601 resource = pci_resource_start(pdev, 0);
3602 len = pci_resource_len(pdev, 0);
3603 if (!request_mem_region(resource, len, driver_name)) {
e56e69cc 3604 ep_dbg(dev, "controller already in use\n");
1da177e4
LT
3605 retval = -EBUSY;
3606 goto done;
3607 }
3608 dev->region = 1;
3609
901b3d75
DB
3610 /* FIXME provide firmware download interface to put
3611 * 8051 code into the chip, e.g. to turn on PCI PM.
3612 */
3613
fae3c158 3614 base = ioremap_nocache(resource, len);
1da177e4 3615 if (base == NULL) {
e56e69cc 3616 ep_dbg(dev, "can't map memory\n");
1da177e4
LT
3617 retval = -EFAULT;
3618 goto done;
3619 }
3620 dev->regs = (struct net2280_regs __iomem *) base;
3621 dev->usb = (struct net2280_usb_regs __iomem *) (base + 0x0080);
3622 dev->pci = (struct net2280_pci_regs __iomem *) (base + 0x0100);
3623 dev->dma = (struct net2280_dma_regs __iomem *) (base + 0x0180);
3624 dev->dep = (struct net2280_dep_regs __iomem *) (base + 0x0200);
3625 dev->epregs = (struct net2280_ep_regs __iomem *) (base + 0x0300);
3626
2eeb0016 3627 if (dev->quirks & PLX_SUPERSPEED) {
adc82f77
RRD
3628 u32 fsmvalue;
3629 u32 usbstat;
3630 dev->usb_ext = (struct usb338x_usb_ext_regs __iomem *)
3631 (base + 0x00b4);
3632 dev->fiforegs = (struct usb338x_fifo_regs __iomem *)
3633 (base + 0x0500);
3634 dev->llregs = (struct usb338x_ll_regs __iomem *)
3635 (base + 0x0700);
3636 dev->ll_lfps_regs = (struct usb338x_ll_lfps_regs __iomem *)
3637 (base + 0x0748);
3638 dev->ll_tsn_regs = (struct usb338x_ll_tsn_regs __iomem *)
3639 (base + 0x077c);
3640 dev->ll_chicken_reg = (struct usb338x_ll_chi_regs __iomem *)
3641 (base + 0x079c);
3642 dev->plregs = (struct usb338x_pl_regs __iomem *)
3643 (base + 0x0800);
3644 usbstat = readl(&dev->usb->usbstat);
fae3c158 3645 dev->enhanced_mode = !!(usbstat & BIT(11));
adc82f77
RRD
3646 dev->n_ep = (dev->enhanced_mode) ? 9 : 5;
3647 /* put into initial config, link up all endpoints */
3648 fsmvalue = get_idx_reg(dev->regs, SCRATCH) &
3649 (0xf << DEFECT7374_FSM_FIELD);
3650 /* See if firmware needs to set up for workaround: */
3651 if (fsmvalue == DEFECT7374_FSM_SS_CONTROL_READ)
3652 writel(0, &dev->usb->usbctl);
3653 } else{
3654 dev->enhanced_mode = 0;
3655 dev->n_ep = 7;
3656 /* put into initial config, link up all endpoints */
3657 writel(0, &dev->usb->usbctl);
3658 }
3659
fae3c158
RRD
3660 usb_reset(dev);
3661 usb_reinit(dev);
1da177e4
LT
3662
3663 /* irq setup after old hardware is cleaned up */
3664 if (!pdev->irq) {
e56e69cc 3665 ep_err(dev, "No IRQ. Check PCI setup!\n");
1da177e4
LT
3666 retval = -ENODEV;
3667 goto done;
3668 }
c6387a48 3669
2eeb0016 3670 if (use_msi && (dev->quirks & PLX_SUPERSPEED))
adc82f77 3671 if (pci_enable_msi(pdev))
e56e69cc 3672 ep_err(dev, "Failed to enable MSI mode\n");
adc82f77 3673
fae3c158
RRD
3674 if (request_irq(pdev->irq, net2280_irq, IRQF_SHARED,
3675 driver_name, dev)) {
e56e69cc 3676 ep_err(dev, "request interrupt %d failed\n", pdev->irq);
1da177e4
LT
3677 retval = -EBUSY;
3678 goto done;
3679 }
3680 dev->got_irq = 1;
3681
3682 /* DMA setup */
3683 /* NOTE: we know only the 32 LSBs of dma addresses may be nonzero */
fae3c158
RRD
3684 dev->requests = pci_pool_create("requests", pdev,
3685 sizeof(struct net2280_dma),
1da177e4
LT
3686 0 /* no alignment requirements */,
3687 0 /* or page-crossing issues */);
3688 if (!dev->requests) {
e56e69cc 3689 ep_dbg(dev, "can't get request pool\n");
1da177e4
LT
3690 retval = -ENOMEM;
3691 goto done;
3692 }
3693 for (i = 1; i < 5; i++) {
3694 struct net2280_dma *td;
3695
fae3c158
RRD
3696 td = pci_pool_alloc(dev->requests, GFP_KERNEL,
3697 &dev->ep[i].td_dma);
1da177e4 3698 if (!td) {
e56e69cc 3699 ep_dbg(dev, "can't get dummy %d\n", i);
1da177e4
LT
3700 retval = -ENOMEM;
3701 goto done;
3702 }
3703 td->dmacount = 0; /* not VALID */
1da177e4 3704 td->dmadesc = td->dmaaddr;
fae3c158 3705 dev->ep[i].dummy = td;
1da177e4
LT
3706 }
3707
3708 /* enable lower-overhead pci memory bursts during DMA */
2eeb0016 3709 if (dev->quirks & PLX_LEGACY)
3e76fdcb
RRD
3710 writel(BIT(DMA_MEMORY_WRITE_AND_INVALIDATE_ENABLE) |
3711 /*
3712 * 256 write retries may not be enough...
3713 BIT(PCI_RETRY_ABORT_ENABLE) |
3714 */
3715 BIT(DMA_READ_MULTIPLE_ENABLE) |
3716 BIT(DMA_READ_LINE_ENABLE),
3717 &dev->pci->pcimstctl);
1da177e4 3718 /* erratum 0115 shouldn't appear: Linux inits PCI_LATENCY_TIMER */
fae3c158
RRD
3719 pci_set_master(pdev);
3720 pci_try_set_mwi(pdev);
1da177e4
LT
3721
3722 /* ... also flushes any posted pci writes */
fae3c158 3723 dev->chiprev = get_idx_reg(dev->regs, REG_CHIPREV) & 0xffff;
1da177e4
LT
3724
3725 /* done */
e56e69cc
RRD
3726 ep_info(dev, "%s\n", driver_desc);
3727 ep_info(dev, "irq %d, pci mem %p, chip rev %04x\n",
c6387a48 3728 pdev->irq, base, dev->chiprev);
e56e69cc 3729 ep_info(dev, "version: " DRIVER_VERSION "; dma %s %s\n",
adc82f77
RRD
3730 use_dma ? (use_dma_chaining ? "chaining" : "enabled")
3731 : "disabled",
3732 dev->enhanced_mode ? "enhanced mode" : "legacy mode");
fae3c158
RRD
3733 retval = device_create_file(&pdev->dev, &dev_attr_registers);
3734 if (retval)
3735 goto done;
1da177e4 3736
2901df68
FB
3737 retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget,
3738 gadget_release);
0f91349b
SAS
3739 if (retval)
3740 goto done;
1da177e4
LT
3741 return 0;
3742
3743done:
3744 if (dev)
fae3c158 3745 net2280_remove(pdev);
1da177e4
LT
3746 return retval;
3747}
3748
2d61bde7
AS
3749/* make sure the board is quiescent; otherwise it will continue
3750 * generating IRQs across the upcoming reboot.
3751 */
3752
fae3c158 3753static void net2280_shutdown(struct pci_dev *pdev)
2d61bde7 3754{
fae3c158 3755 struct net2280 *dev = pci_get_drvdata(pdev);
2d61bde7
AS
3756
3757 /* disable IRQs */
fae3c158
RRD
3758 writel(0, &dev->regs->pciirqenb0);
3759 writel(0, &dev->regs->pciirqenb1);
2d61bde7
AS
3760
3761 /* disable the pullup so the host will think we're gone */
fae3c158 3762 writel(0, &dev->usb->usbctl);
2f076077
AS
3763
3764 /* Disable full-speed test mode */
2eeb0016 3765 if (dev->quirks & PLX_LEGACY)
adc82f77 3766 writel(0, &dev->usb->xcvrdiag);
2d61bde7
AS
3767}
3768
1da177e4
LT
3769
3770/*-------------------------------------------------------------------------*/
3771
fae3c158 3772static const struct pci_device_id pci_ids[] = { {
901b3d75
DB
3773 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3774 .class_mask = ~0,
c2db8a8a 3775 .vendor = PCI_VENDOR_ID_PLX_LEGACY,
1da177e4
LT
3776 .device = 0x2280,
3777 .subvendor = PCI_ANY_ID,
3778 .subdevice = PCI_ANY_ID,
2eeb0016 3779 .driver_data = PLX_LEGACY | PLX_2280,
ae8e530a 3780 }, {
901b3d75
DB
3781 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3782 .class_mask = ~0,
c2db8a8a 3783 .vendor = PCI_VENDOR_ID_PLX_LEGACY,
950ee4c8
GL
3784 .device = 0x2282,
3785 .subvendor = PCI_ANY_ID,
3786 .subdevice = PCI_ANY_ID,
2eeb0016 3787 .driver_data = PLX_LEGACY,
ae8e530a 3788 },
adc82f77 3789 {
ae8e530a
RRD
3790 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3791 .class_mask = ~0,
3792 .vendor = PCI_VENDOR_ID_PLX,
3793 .device = 0x3380,
3794 .subvendor = PCI_ANY_ID,
3795 .subdevice = PCI_ANY_ID,
2eeb0016 3796 .driver_data = PLX_SUPERSPEED,
adc82f77
RRD
3797 },
3798 {
ae8e530a
RRD
3799 .class = ((PCI_CLASS_SERIAL_USB << 8) | 0xfe),
3800 .class_mask = ~0,
3801 .vendor = PCI_VENDOR_ID_PLX,
3802 .device = 0x3382,
3803 .subvendor = PCI_ANY_ID,
3804 .subdevice = PCI_ANY_ID,
2eeb0016 3805 .driver_data = PLX_SUPERSPEED,
adc82f77
RRD
3806 },
3807{ /* end: all zeroes */ }
1da177e4 3808};
fae3c158 3809MODULE_DEVICE_TABLE(pci, pci_ids);
1da177e4
LT
3810
3811/* pci driver glue; this is a "new style" PCI driver module */
3812static struct pci_driver net2280_pci_driver = {
3813 .name = (char *) driver_name,
3814 .id_table = pci_ids,
3815
3816 .probe = net2280_probe,
3817 .remove = net2280_remove,
2d61bde7 3818 .shutdown = net2280_shutdown,
1da177e4
LT
3819
3820 /* FIXME add power management support */
3821};
3822
9a028e46
RRD
3823module_pci_driver(net2280_pci_driver);
3824
fae3c158
RRD
3825MODULE_DESCRIPTION(DRIVER_DESC);
3826MODULE_AUTHOR("David Brownell");
3827MODULE_LICENSE("GPL");