Commit | Line | Data |
---|---|---|
5fd54ace | 1 | // SPDX-License-Identifier: GPL-2.0+ |
b92a78e5 RG |
2 | /* |
3 | * Copyright (c) 2008 Rodolfo Giometti <giometti@linux.it> | |
4 | * Copyright (c) 2008 Eurotech S.p.A. <info@eurtech.it> | |
5 | * | |
6 | * This code is *strongly* based on EHCI-HCD code by David Brownell since | |
7 | * the chip is a quasi-EHCI compatible. | |
b92a78e5 RG |
8 | */ |
9 | ||
10 | #include <linux/module.h> | |
11 | #include <linux/pci.h> | |
12 | #include <linux/dmapool.h> | |
13 | #include <linux/kernel.h> | |
14 | #include <linux/delay.h> | |
15 | #include <linux/ioport.h> | |
16 | #include <linux/sched.h> | |
17 | #include <linux/slab.h> | |
18 | #include <linux/errno.h> | |
b92a78e5 RG |
19 | #include <linux/timer.h> |
20 | #include <linux/list.h> | |
21 | #include <linux/interrupt.h> | |
b92a78e5 | 22 | #include <linux/usb.h> |
27729aad | 23 | #include <linux/usb/hcd.h> |
b92a78e5 RG |
24 | #include <linux/moduleparam.h> |
25 | #include <linux/dma-mapping.h> | |
26 | #include <linux/io.h> | |
27 | ||
b92a78e5 | 28 | #include <asm/irq.h> |
b92a78e5 RG |
29 | #include <asm/unaligned.h> |
30 | ||
31 | #include <linux/irq.h> | |
32 | #include <linux/platform_device.h> | |
33 | ||
34 | #include "oxu210hp.h" | |
35 | ||
36 | #define DRIVER_VERSION "0.0.50" | |
37 | ||
38 | /* | |
39 | * Main defines | |
40 | */ | |
41 | ||
42 | #define oxu_dbg(oxu, fmt, args...) \ | |
43 | dev_dbg(oxu_to_hcd(oxu)->self.controller , fmt , ## args) | |
44 | #define oxu_err(oxu, fmt, args...) \ | |
45 | dev_err(oxu_to_hcd(oxu)->self.controller , fmt , ## args) | |
46 | #define oxu_info(oxu, fmt, args...) \ | |
47 | dev_info(oxu_to_hcd(oxu)->self.controller , fmt , ## args) | |
48 | ||
1c20163d ON |
49 | #ifdef CONFIG_DYNAMIC_DEBUG |
50 | #define DEBUG | |
51 | #endif | |
52 | ||
b92a78e5 RG |
53 | static inline struct usb_hcd *oxu_to_hcd(struct oxu_hcd *oxu) |
54 | { | |
55 | return container_of((void *) oxu, struct usb_hcd, hcd_priv); | |
56 | } | |
57 | ||
58 | static inline struct oxu_hcd *hcd_to_oxu(struct usb_hcd *hcd) | |
59 | { | |
60 | return (struct oxu_hcd *) (hcd->hcd_priv); | |
61 | } | |
62 | ||
63 | /* | |
64 | * Debug stuff | |
65 | */ | |
66 | ||
67 | #undef OXU_URB_TRACE | |
68 | #undef OXU_VERBOSE_DEBUG | |
69 | ||
70 | #ifdef OXU_VERBOSE_DEBUG | |
71 | #define oxu_vdbg oxu_dbg | |
72 | #else | |
73 | #define oxu_vdbg(oxu, fmt, args...) /* Nop */ | |
74 | #endif | |
75 | ||
76 | #ifdef DEBUG | |
77 | ||
78 | static int __attribute__((__unused__)) | |
79 | dbg_status_buf(char *buf, unsigned len, const char *label, u32 status) | |
80 | { | |
81 | return scnprintf(buf, len, "%s%sstatus %04x%s%s%s%s%s%s%s%s%s%s", | |
82 | label, label[0] ? " " : "", status, | |
83 | (status & STS_ASS) ? " Async" : "", | |
84 | (status & STS_PSS) ? " Periodic" : "", | |
85 | (status & STS_RECL) ? " Recl" : "", | |
86 | (status & STS_HALT) ? " Halt" : "", | |
87 | (status & STS_IAA) ? " IAA" : "", | |
88 | (status & STS_FATAL) ? " FATAL" : "", | |
89 | (status & STS_FLR) ? " FLR" : "", | |
90 | (status & STS_PCD) ? " PCD" : "", | |
91 | (status & STS_ERR) ? " ERR" : "", | |
92 | (status & STS_INT) ? " INT" : "" | |
93 | ); | |
94 | } | |
95 | ||
96 | static int __attribute__((__unused__)) | |
97 | dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable) | |
98 | { | |
99 | return scnprintf(buf, len, "%s%sintrenable %02x%s%s%s%s%s%s", | |
100 | label, label[0] ? " " : "", enable, | |
101 | (enable & STS_IAA) ? " IAA" : "", | |
102 | (enable & STS_FATAL) ? " FATAL" : "", | |
103 | (enable & STS_FLR) ? " FLR" : "", | |
104 | (enable & STS_PCD) ? " PCD" : "", | |
105 | (enable & STS_ERR) ? " ERR" : "", | |
106 | (enable & STS_INT) ? " INT" : "" | |
107 | ); | |
108 | } | |
109 | ||
110 | static const char *const fls_strings[] = | |
111 | { "1024", "512", "256", "??" }; | |
112 | ||
113 | static int dbg_command_buf(char *buf, unsigned len, | |
114 | const char *label, u32 command) | |
115 | { | |
116 | return scnprintf(buf, len, | |
117 | "%s%scommand %06x %s=%d ithresh=%d%s%s%s%s period=%s%s %s", | |
118 | label, label[0] ? " " : "", command, | |
119 | (command & CMD_PARK) ? "park" : "(park)", | |
120 | CMD_PARK_CNT(command), | |
121 | (command >> 16) & 0x3f, | |
122 | (command & CMD_LRESET) ? " LReset" : "", | |
123 | (command & CMD_IAAD) ? " IAAD" : "", | |
124 | (command & CMD_ASE) ? " Async" : "", | |
125 | (command & CMD_PSE) ? " Periodic" : "", | |
126 | fls_strings[(command >> 2) & 0x3], | |
127 | (command & CMD_RESET) ? " Reset" : "", | |
128 | (command & CMD_RUN) ? "RUN" : "HALT" | |
129 | ); | |
130 | } | |
131 | ||
132 | static int dbg_port_buf(char *buf, unsigned len, const char *label, | |
133 | int port, u32 status) | |
134 | { | |
135 | char *sig; | |
136 | ||
137 | /* signaling state */ | |
138 | switch (status & (3 << 10)) { | |
139 | case 0 << 10: | |
140 | sig = "se0"; | |
141 | break; | |
142 | case 1 << 10: | |
143 | sig = "k"; /* low speed */ | |
144 | break; | |
145 | case 2 << 10: | |
146 | sig = "j"; | |
147 | break; | |
148 | default: | |
149 | sig = "?"; | |
150 | break; | |
151 | } | |
152 | ||
153 | return scnprintf(buf, len, | |
154 | "%s%sport %d status %06x%s%s sig=%s%s%s%s%s%s%s%s%s%s", | |
155 | label, label[0] ? " " : "", port, status, | |
156 | (status & PORT_POWER) ? " POWER" : "", | |
157 | (status & PORT_OWNER) ? " OWNER" : "", | |
158 | sig, | |
159 | (status & PORT_RESET) ? " RESET" : "", | |
160 | (status & PORT_SUSPEND) ? " SUSPEND" : "", | |
161 | (status & PORT_RESUME) ? " RESUME" : "", | |
162 | (status & PORT_OCC) ? " OCC" : "", | |
163 | (status & PORT_OC) ? " OC" : "", | |
164 | (status & PORT_PEC) ? " PEC" : "", | |
165 | (status & PORT_PE) ? " PE" : "", | |
166 | (status & PORT_CSC) ? " CSC" : "", | |
167 | (status & PORT_CONNECT) ? " CONNECT" : "" | |
168 | ); | |
169 | } | |
170 | ||
171 | #else | |
172 | ||
173 | static inline int __attribute__((__unused__)) | |
174 | dbg_status_buf(char *buf, unsigned len, const char *label, u32 status) | |
175 | { return 0; } | |
176 | ||
177 | static inline int __attribute__((__unused__)) | |
178 | dbg_command_buf(char *buf, unsigned len, const char *label, u32 command) | |
179 | { return 0; } | |
180 | ||
181 | static inline int __attribute__((__unused__)) | |
182 | dbg_intr_buf(char *buf, unsigned len, const char *label, u32 enable) | |
183 | { return 0; } | |
184 | ||
185 | static inline int __attribute__((__unused__)) | |
186 | dbg_port_buf(char *buf, unsigned len, const char *label, int port, u32 status) | |
187 | { return 0; } | |
188 | ||
189 | #endif /* DEBUG */ | |
190 | ||
191 | /* functions have the "wrong" filename when they're output... */ | |
192 | #define dbg_status(oxu, label, status) { \ | |
193 | char _buf[80]; \ | |
194 | dbg_status_buf(_buf, sizeof _buf, label, status); \ | |
195 | oxu_dbg(oxu, "%s\n", _buf); \ | |
196 | } | |
197 | ||
198 | #define dbg_cmd(oxu, label, command) { \ | |
199 | char _buf[80]; \ | |
200 | dbg_command_buf(_buf, sizeof _buf, label, command); \ | |
201 | oxu_dbg(oxu, "%s\n", _buf); \ | |
202 | } | |
203 | ||
204 | #define dbg_port(oxu, label, port, status) { \ | |
205 | char _buf[80]; \ | |
206 | dbg_port_buf(_buf, sizeof _buf, label, port, status); \ | |
207 | oxu_dbg(oxu, "%s\n", _buf); \ | |
208 | } | |
209 | ||
210 | /* | |
211 | * Module parameters | |
212 | */ | |
213 | ||
214 | /* Initial IRQ latency: faster than hw default */ | |
215 | static int log2_irq_thresh; /* 0 to 6 */ | |
216 | module_param(log2_irq_thresh, int, S_IRUGO); | |
217 | MODULE_PARM_DESC(log2_irq_thresh, "log2 IRQ latency, 1-64 microframes"); | |
218 | ||
219 | /* Initial park setting: slower than hw default */ | |
220 | static unsigned park; | |
221 | module_param(park, uint, S_IRUGO); | |
222 | MODULE_PARM_DESC(park, "park setting; 1-3 back-to-back async packets"); | |
223 | ||
224 | /* For flakey hardware, ignore overcurrent indicators */ | |
90ab5ee9 | 225 | static bool ignore_oc; |
b92a78e5 RG |
226 | module_param(ignore_oc, bool, S_IRUGO); |
227 | MODULE_PARM_DESC(ignore_oc, "ignore bogus hardware overcurrent indications"); | |
228 | ||
229 | ||
230 | static void ehci_work(struct oxu_hcd *oxu); | |
231 | static int oxu_hub_control(struct usb_hcd *hcd, | |
232 | u16 typeReq, u16 wValue, u16 wIndex, | |
233 | char *buf, u16 wLength); | |
234 | ||
235 | /* | |
236 | * Local functions | |
237 | */ | |
238 | ||
239 | /* Low level read/write registers functions */ | |
240 | static inline u32 oxu_readl(void *base, u32 reg) | |
241 | { | |
242 | return readl(base + reg); | |
243 | } | |
244 | ||
245 | static inline void oxu_writel(void *base, u32 reg, u32 val) | |
246 | { | |
247 | writel(val, base + reg); | |
248 | } | |
249 | ||
250 | static inline void timer_action_done(struct oxu_hcd *oxu, | |
251 | enum ehci_timer_action action) | |
252 | { | |
253 | clear_bit(action, &oxu->actions); | |
254 | } | |
255 | ||
256 | static inline void timer_action(struct oxu_hcd *oxu, | |
257 | enum ehci_timer_action action) | |
258 | { | |
259 | if (!test_and_set_bit(action, &oxu->actions)) { | |
260 | unsigned long t; | |
261 | ||
262 | switch (action) { | |
263 | case TIMER_IAA_WATCHDOG: | |
264 | t = EHCI_IAA_JIFFIES; | |
265 | break; | |
266 | case TIMER_IO_WATCHDOG: | |
267 | t = EHCI_IO_JIFFIES; | |
268 | break; | |
269 | case TIMER_ASYNC_OFF: | |
270 | t = EHCI_ASYNC_JIFFIES; | |
271 | break; | |
272 | case TIMER_ASYNC_SHRINK: | |
273 | default: | |
274 | t = EHCI_SHRINK_JIFFIES; | |
275 | break; | |
276 | } | |
277 | t += jiffies; | |
278 | /* all timings except IAA watchdog can be overridden. | |
279 | * async queue SHRINK often precedes IAA. while it's ready | |
280 | * to go OFF neither can matter, and afterwards the IO | |
281 | * watchdog stops unless there's still periodic traffic. | |
282 | */ | |
283 | if (action != TIMER_IAA_WATCHDOG | |
284 | && t > oxu->watchdog.expires | |
285 | && timer_pending(&oxu->watchdog)) | |
286 | return; | |
287 | mod_timer(&oxu->watchdog, t); | |
288 | } | |
289 | } | |
290 | ||
291 | /* | |
292 | * handshake - spin reading hc until handshake completes or fails | |
293 | * @ptr: address of hc register to be read | |
294 | * @mask: bits to look at in result of read | |
295 | * @done: value of those bits when handshake succeeds | |
296 | * @usec: timeout in microseconds | |
297 | * | |
298 | * Returns negative errno, or zero on success | |
299 | * | |
300 | * Success happens when the "mask" bits have the specified value (hardware | |
301 | * handshake done). There are two failure modes: "usec" have passed (major | |
302 | * hardware flakeout), or the register reads as all-ones (hardware removed). | |
303 | * | |
304 | * That last failure should_only happen in cases like physical cardbus eject | |
305 | * before driver shutdown. But it also seems to be caused by bugs in cardbus | |
306 | * bridge shutdown: shutting down the bridge before the devices using it. | |
307 | */ | |
308 | static int handshake(struct oxu_hcd *oxu, void __iomem *ptr, | |
309 | u32 mask, u32 done, int usec) | |
310 | { | |
311 | u32 result; | |
312 | ||
313 | do { | |
314 | result = readl(ptr); | |
315 | if (result == ~(u32)0) /* card removed */ | |
316 | return -ENODEV; | |
317 | result &= mask; | |
318 | if (result == done) | |
319 | return 0; | |
320 | udelay(1); | |
321 | usec--; | |
322 | } while (usec > 0); | |
323 | return -ETIMEDOUT; | |
324 | } | |
325 | ||
326 | /* Force HC to halt state from unknown (EHCI spec section 2.3) */ | |
327 | static int ehci_halt(struct oxu_hcd *oxu) | |
328 | { | |
329 | u32 temp = readl(&oxu->regs->status); | |
330 | ||
331 | /* disable any irqs left enabled by previous code */ | |
332 | writel(0, &oxu->regs->intr_enable); | |
333 | ||
334 | if ((temp & STS_HALT) != 0) | |
335 | return 0; | |
336 | ||
337 | temp = readl(&oxu->regs->command); | |
338 | temp &= ~CMD_RUN; | |
339 | writel(temp, &oxu->regs->command); | |
340 | return handshake(oxu, &oxu->regs->status, | |
341 | STS_HALT, STS_HALT, 16 * 125); | |
342 | } | |
343 | ||
344 | /* Put TDI/ARC silicon into EHCI mode */ | |
345 | static void tdi_reset(struct oxu_hcd *oxu) | |
346 | { | |
347 | u32 __iomem *reg_ptr; | |
348 | u32 tmp; | |
349 | ||
350 | reg_ptr = (u32 __iomem *)(((u8 __iomem *)oxu->regs) + 0x68); | |
351 | tmp = readl(reg_ptr); | |
352 | tmp |= 0x3; | |
353 | writel(tmp, reg_ptr); | |
354 | } | |
355 | ||
356 | /* Reset a non-running (STS_HALT == 1) controller */ | |
357 | static int ehci_reset(struct oxu_hcd *oxu) | |
358 | { | |
359 | int retval; | |
360 | u32 command = readl(&oxu->regs->command); | |
361 | ||
362 | command |= CMD_RESET; | |
363 | dbg_cmd(oxu, "reset", command); | |
364 | writel(command, &oxu->regs->command); | |
365 | oxu_to_hcd(oxu)->state = HC_STATE_HALT; | |
366 | oxu->next_statechange = jiffies; | |
367 | retval = handshake(oxu, &oxu->regs->command, | |
368 | CMD_RESET, 0, 250 * 1000); | |
369 | ||
370 | if (retval) | |
371 | return retval; | |
372 | ||
373 | tdi_reset(oxu); | |
374 | ||
375 | return retval; | |
376 | } | |
377 | ||
378 | /* Idle the controller (from running) */ | |
379 | static void ehci_quiesce(struct oxu_hcd *oxu) | |
380 | { | |
381 | u32 temp; | |
382 | ||
383 | #ifdef DEBUG | |
debe26af | 384 | BUG_ON(!HC_IS_RUNNING(oxu_to_hcd(oxu)->state)); |
b92a78e5 RG |
385 | #endif |
386 | ||
387 | /* wait for any schedule enables/disables to take effect */ | |
388 | temp = readl(&oxu->regs->command) << 10; | |
389 | temp &= STS_ASS | STS_PSS; | |
390 | if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS, | |
391 | temp, 16 * 125) != 0) { | |
392 | oxu_to_hcd(oxu)->state = HC_STATE_HALT; | |
393 | return; | |
394 | } | |
395 | ||
396 | /* then disable anything that's still active */ | |
397 | temp = readl(&oxu->regs->command); | |
398 | temp &= ~(CMD_ASE | CMD_IAAD | CMD_PSE); | |
399 | writel(temp, &oxu->regs->command); | |
400 | ||
401 | /* hardware can take 16 microframes to turn off ... */ | |
402 | if (handshake(oxu, &oxu->regs->status, STS_ASS | STS_PSS, | |
403 | 0, 16 * 125) != 0) { | |
404 | oxu_to_hcd(oxu)->state = HC_STATE_HALT; | |
405 | return; | |
406 | } | |
407 | } | |
408 | ||
409 | static int check_reset_complete(struct oxu_hcd *oxu, int index, | |
410 | u32 __iomem *status_reg, int port_status) | |
411 | { | |
412 | if (!(port_status & PORT_CONNECT)) { | |
413 | oxu->reset_done[index] = 0; | |
414 | return port_status; | |
415 | } | |
416 | ||
417 | /* if reset finished and it's still not enabled -- handoff */ | |
418 | if (!(port_status & PORT_PE)) { | |
419 | oxu_dbg(oxu, "Failed to enable port %d on root hub TT\n", | |
420 | index+1); | |
421 | return port_status; | |
422 | } else | |
423 | oxu_dbg(oxu, "port %d high speed\n", index + 1); | |
424 | ||
425 | return port_status; | |
426 | } | |
427 | ||
428 | static void ehci_hub_descriptor(struct oxu_hcd *oxu, | |
429 | struct usb_hub_descriptor *desc) | |
430 | { | |
431 | int ports = HCS_N_PORTS(oxu->hcs_params); | |
432 | u16 temp; | |
433 | ||
2c42c087 | 434 | desc->bDescriptorType = USB_DT_HUB; |
b92a78e5 RG |
435 | desc->bPwrOn2PwrGood = 10; /* oxu 1.0, 2.3.9 says 20ms max */ |
436 | desc->bHubContrCurrent = 0; | |
437 | ||
438 | desc->bNbrPorts = ports; | |
439 | temp = 1 + (ports / 8); | |
440 | desc->bDescLength = 7 + 2 * temp; | |
441 | ||
da13051c | 442 | /* ports removable, and usb 1.0 legacy PortPwrCtrlMask */ |
dbe79bbe JY |
443 | memset(&desc->u.hs.DeviceRemovable[0], 0, temp); |
444 | memset(&desc->u.hs.DeviceRemovable[temp], 0xff, temp); | |
b92a78e5 | 445 | |
85943f34 | 446 | temp = HUB_CHAR_INDV_PORT_OCPM; /* per-port overcurrent reporting */ |
b92a78e5 | 447 | if (HCS_PPC(oxu->hcs_params)) |
85943f34 | 448 | temp |= HUB_CHAR_INDV_PORT_LPSM; /* per-port power control */ |
b92a78e5 | 449 | else |
85943f34 | 450 | temp |= HUB_CHAR_NO_LPSM; /* no power switching */ |
b92a78e5 RG |
451 | desc->wHubCharacteristics = (__force __u16)cpu_to_le16(temp); |
452 | } | |
453 | ||
454 | ||
455 | /* Allocate an OXU210HP on-chip memory data buffer | |
456 | * | |
457 | * An on-chip memory data buffer is required for each OXU210HP USB transfer. | |
458 | * Each transfer descriptor has one or more on-chip memory data buffers. | |
459 | * | |
460 | * Data buffers are allocated from a fix sized pool of data blocks. | |
461 | * To minimise fragmentation and give reasonable memory utlisation, | |
462 | * data buffers are allocated with sizes the power of 2 multiples of | |
463 | * the block size, starting on an address a multiple of the allocated size. | |
464 | * | |
465 | * FIXME: callers of this function require a buffer to be allocated for | |
466 | * len=0. This is a waste of on-chip memory and should be fix. Then this | |
467 | * function should be changed to not allocate a buffer for len=0. | |
468 | */ | |
469 | static int oxu_buf_alloc(struct oxu_hcd *oxu, struct ehci_qtd *qtd, int len) | |
470 | { | |
471 | int n_blocks; /* minium blocks needed to hold len */ | |
472 | int a_blocks; /* blocks allocated */ | |
473 | int i, j; | |
474 | ||
475 | /* Don't allocte bigger than supported */ | |
476 | if (len > BUFFER_SIZE * BUFFER_NUM) { | |
477 | oxu_err(oxu, "buffer too big (%d)\n", len); | |
478 | return -ENOMEM; | |
479 | } | |
480 | ||
481 | spin_lock(&oxu->mem_lock); | |
482 | ||
483 | /* Number of blocks needed to hold len */ | |
484 | n_blocks = (len + BUFFER_SIZE - 1) / BUFFER_SIZE; | |
485 | ||
486 | /* Round the number of blocks up to the power of 2 */ | |
487 | for (a_blocks = 1; a_blocks < n_blocks; a_blocks <<= 1) | |
488 | ; | |
489 | ||
490 | /* Find a suitable available data buffer */ | |
491 | for (i = 0; i < BUFFER_NUM; | |
492 | i += max(a_blocks, (int)oxu->db_used[i])) { | |
493 | ||
494 | /* Check all the required blocks are available */ | |
495 | for (j = 0; j < a_blocks; j++) | |
496 | if (oxu->db_used[i + j]) | |
497 | break; | |
498 | ||
499 | if (j != a_blocks) | |
500 | continue; | |
501 | ||
502 | /* Allocate blocks found! */ | |
503 | qtd->buffer = (void *) &oxu->mem->db_pool[i]; | |
504 | qtd->buffer_dma = virt_to_phys(qtd->buffer); | |
505 | ||
506 | qtd->qtd_buffer_len = BUFFER_SIZE * a_blocks; | |
507 | oxu->db_used[i] = a_blocks; | |
508 | ||
509 | spin_unlock(&oxu->mem_lock); | |
510 | ||
511 | return 0; | |
512 | } | |
513 | ||
514 | /* Failed */ | |
515 | ||
516 | spin_unlock(&oxu->mem_lock); | |
517 | ||
518 | return -ENOMEM; | |
519 | } | |
520 | ||
521 | static void oxu_buf_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd) | |
522 | { | |
523 | int index; | |
524 | ||
525 | spin_lock(&oxu->mem_lock); | |
526 | ||
527 | index = (qtd->buffer - (void *) &oxu->mem->db_pool[0]) | |
528 | / BUFFER_SIZE; | |
529 | oxu->db_used[index] = 0; | |
530 | qtd->qtd_buffer_len = 0; | |
531 | qtd->buffer_dma = 0; | |
532 | qtd->buffer = NULL; | |
533 | ||
534 | spin_unlock(&oxu->mem_lock); | |
b92a78e5 RG |
535 | } |
536 | ||
537 | static inline void ehci_qtd_init(struct ehci_qtd *qtd, dma_addr_t dma) | |
538 | { | |
539 | memset(qtd, 0, sizeof *qtd); | |
540 | qtd->qtd_dma = dma; | |
541 | qtd->hw_token = cpu_to_le32(QTD_STS_HALT); | |
542 | qtd->hw_next = EHCI_LIST_END; | |
543 | qtd->hw_alt_next = EHCI_LIST_END; | |
544 | INIT_LIST_HEAD(&qtd->qtd_list); | |
545 | } | |
546 | ||
547 | static inline void oxu_qtd_free(struct oxu_hcd *oxu, struct ehci_qtd *qtd) | |
548 | { | |
549 | int index; | |
550 | ||
551 | if (qtd->buffer) | |
552 | oxu_buf_free(oxu, qtd); | |
553 | ||
554 | spin_lock(&oxu->mem_lock); | |
555 | ||
556 | index = qtd - &oxu->mem->qtd_pool[0]; | |
557 | oxu->qtd_used[index] = 0; | |
558 | ||
559 | spin_unlock(&oxu->mem_lock); | |
b92a78e5 RG |
560 | } |
561 | ||
562 | static struct ehci_qtd *ehci_qtd_alloc(struct oxu_hcd *oxu) | |
563 | { | |
564 | int i; | |
565 | struct ehci_qtd *qtd = NULL; | |
566 | ||
567 | spin_lock(&oxu->mem_lock); | |
568 | ||
569 | for (i = 0; i < QTD_NUM; i++) | |
570 | if (!oxu->qtd_used[i]) | |
571 | break; | |
572 | ||
573 | if (i < QTD_NUM) { | |
574 | qtd = (struct ehci_qtd *) &oxu->mem->qtd_pool[i]; | |
575 | memset(qtd, 0, sizeof *qtd); | |
576 | ||
577 | qtd->hw_token = cpu_to_le32(QTD_STS_HALT); | |
578 | qtd->hw_next = EHCI_LIST_END; | |
579 | qtd->hw_alt_next = EHCI_LIST_END; | |
580 | INIT_LIST_HEAD(&qtd->qtd_list); | |
581 | ||
582 | qtd->qtd_dma = virt_to_phys(qtd); | |
583 | ||
584 | oxu->qtd_used[i] = 1; | |
585 | } | |
586 | ||
587 | spin_unlock(&oxu->mem_lock); | |
588 | ||
589 | return qtd; | |
590 | } | |
591 | ||
592 | static void oxu_qh_free(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
593 | { | |
594 | int index; | |
595 | ||
596 | spin_lock(&oxu->mem_lock); | |
597 | ||
598 | index = qh - &oxu->mem->qh_pool[0]; | |
599 | oxu->qh_used[index] = 0; | |
600 | ||
601 | spin_unlock(&oxu->mem_lock); | |
b92a78e5 RG |
602 | } |
603 | ||
604 | static void qh_destroy(struct kref *kref) | |
605 | { | |
606 | struct ehci_qh *qh = container_of(kref, struct ehci_qh, kref); | |
607 | struct oxu_hcd *oxu = qh->oxu; | |
608 | ||
609 | /* clean qtds first, and know this is not linked */ | |
610 | if (!list_empty(&qh->qtd_list) || qh->qh_next.ptr) { | |
611 | oxu_dbg(oxu, "unused qh not empty!\n"); | |
612 | BUG(); | |
613 | } | |
614 | if (qh->dummy) | |
615 | oxu_qtd_free(oxu, qh->dummy); | |
616 | oxu_qh_free(oxu, qh); | |
617 | } | |
618 | ||
619 | static struct ehci_qh *oxu_qh_alloc(struct oxu_hcd *oxu) | |
620 | { | |
621 | int i; | |
622 | struct ehci_qh *qh = NULL; | |
623 | ||
624 | spin_lock(&oxu->mem_lock); | |
625 | ||
626 | for (i = 0; i < QHEAD_NUM; i++) | |
627 | if (!oxu->qh_used[i]) | |
628 | break; | |
629 | ||
630 | if (i < QHEAD_NUM) { | |
631 | qh = (struct ehci_qh *) &oxu->mem->qh_pool[i]; | |
632 | memset(qh, 0, sizeof *qh); | |
633 | ||
634 | kref_init(&qh->kref); | |
635 | qh->oxu = oxu; | |
636 | qh->qh_dma = virt_to_phys(qh); | |
637 | INIT_LIST_HEAD(&qh->qtd_list); | |
638 | ||
639 | /* dummy td enables safe urb queuing */ | |
640 | qh->dummy = ehci_qtd_alloc(oxu); | |
641 | if (qh->dummy == NULL) { | |
642 | oxu_dbg(oxu, "no dummy td\n"); | |
643 | oxu->qh_used[i] = 0; | |
82a5eeb9 DC |
644 | qh = NULL; |
645 | goto unlock; | |
b92a78e5 RG |
646 | } |
647 | ||
648 | oxu->qh_used[i] = 1; | |
649 | } | |
82a5eeb9 | 650 | unlock: |
b92a78e5 RG |
651 | spin_unlock(&oxu->mem_lock); |
652 | ||
653 | return qh; | |
654 | } | |
655 | ||
656 | /* to share a qh (cpu threads, or hc) */ | |
657 | static inline struct ehci_qh *qh_get(struct ehci_qh *qh) | |
658 | { | |
659 | kref_get(&qh->kref); | |
660 | return qh; | |
661 | } | |
662 | ||
663 | static inline void qh_put(struct ehci_qh *qh) | |
664 | { | |
665 | kref_put(&qh->kref, qh_destroy); | |
666 | } | |
667 | ||
668 | static void oxu_murb_free(struct oxu_hcd *oxu, struct oxu_murb *murb) | |
669 | { | |
670 | int index; | |
671 | ||
672 | spin_lock(&oxu->mem_lock); | |
673 | ||
674 | index = murb - &oxu->murb_pool[0]; | |
675 | oxu->murb_used[index] = 0; | |
676 | ||
677 | spin_unlock(&oxu->mem_lock); | |
b92a78e5 RG |
678 | } |
679 | ||
680 | static struct oxu_murb *oxu_murb_alloc(struct oxu_hcd *oxu) | |
681 | ||
682 | { | |
683 | int i; | |
684 | struct oxu_murb *murb = NULL; | |
685 | ||
686 | spin_lock(&oxu->mem_lock); | |
687 | ||
688 | for (i = 0; i < MURB_NUM; i++) | |
689 | if (!oxu->murb_used[i]) | |
690 | break; | |
691 | ||
692 | if (i < MURB_NUM) { | |
693 | murb = &(oxu->murb_pool)[i]; | |
694 | ||
695 | oxu->murb_used[i] = 1; | |
696 | } | |
697 | ||
698 | spin_unlock(&oxu->mem_lock); | |
699 | ||
700 | return murb; | |
701 | } | |
702 | ||
703 | /* The queue heads and transfer descriptors are managed from pools tied | |
704 | * to each of the "per device" structures. | |
705 | * This is the initialisation and cleanup code. | |
706 | */ | |
707 | static void ehci_mem_cleanup(struct oxu_hcd *oxu) | |
708 | { | |
709 | kfree(oxu->murb_pool); | |
710 | oxu->murb_pool = NULL; | |
711 | ||
712 | if (oxu->async) | |
713 | qh_put(oxu->async); | |
714 | oxu->async = NULL; | |
715 | ||
716 | del_timer(&oxu->urb_timer); | |
717 | ||
718 | oxu->periodic = NULL; | |
719 | ||
720 | /* shadow periodic table */ | |
721 | kfree(oxu->pshadow); | |
722 | oxu->pshadow = NULL; | |
723 | } | |
724 | ||
725 | /* Remember to add cleanup code (above) if you add anything here. | |
726 | */ | |
727 | static int ehci_mem_init(struct oxu_hcd *oxu, gfp_t flags) | |
728 | { | |
729 | int i; | |
730 | ||
731 | for (i = 0; i < oxu->periodic_size; i++) | |
732 | oxu->mem->frame_list[i] = EHCI_LIST_END; | |
733 | for (i = 0; i < QHEAD_NUM; i++) | |
734 | oxu->qh_used[i] = 0; | |
735 | for (i = 0; i < QTD_NUM; i++) | |
736 | oxu->qtd_used[i] = 0; | |
737 | ||
738 | oxu->murb_pool = kcalloc(MURB_NUM, sizeof(struct oxu_murb), flags); | |
739 | if (!oxu->murb_pool) | |
740 | goto fail; | |
741 | ||
742 | for (i = 0; i < MURB_NUM; i++) | |
743 | oxu->murb_used[i] = 0; | |
744 | ||
745 | oxu->async = oxu_qh_alloc(oxu); | |
746 | if (!oxu->async) | |
747 | goto fail; | |
748 | ||
749 | oxu->periodic = (__le32 *) &oxu->mem->frame_list; | |
750 | oxu->periodic_dma = virt_to_phys(oxu->periodic); | |
751 | ||
752 | for (i = 0; i < oxu->periodic_size; i++) | |
753 | oxu->periodic[i] = EHCI_LIST_END; | |
754 | ||
755 | /* software shadow of hardware table */ | |
756 | oxu->pshadow = kcalloc(oxu->periodic_size, sizeof(void *), flags); | |
757 | if (oxu->pshadow != NULL) | |
758 | return 0; | |
759 | ||
760 | fail: | |
761 | oxu_dbg(oxu, "couldn't init memory\n"); | |
762 | ehci_mem_cleanup(oxu); | |
763 | return -ENOMEM; | |
764 | } | |
765 | ||
766 | /* Fill a qtd, returning how much of the buffer we were able to queue up. | |
767 | */ | |
768 | static int qtd_fill(struct ehci_qtd *qtd, dma_addr_t buf, size_t len, | |
769 | int token, int maxpacket) | |
770 | { | |
771 | int i, count; | |
772 | u64 addr = buf; | |
773 | ||
774 | /* one buffer entry per 4K ... first might be short or unaligned */ | |
775 | qtd->hw_buf[0] = cpu_to_le32((u32)addr); | |
776 | qtd->hw_buf_hi[0] = cpu_to_le32((u32)(addr >> 32)); | |
777 | count = 0x1000 - (buf & 0x0fff); /* rest of that page */ | |
778 | if (likely(len < count)) /* ... iff needed */ | |
779 | count = len; | |
780 | else { | |
781 | buf += 0x1000; | |
782 | buf &= ~0x0fff; | |
783 | ||
784 | /* per-qtd limit: from 16K to 20K (best alignment) */ | |
785 | for (i = 1; count < len && i < 5; i++) { | |
786 | addr = buf; | |
787 | qtd->hw_buf[i] = cpu_to_le32((u32)addr); | |
788 | qtd->hw_buf_hi[i] = cpu_to_le32((u32)(addr >> 32)); | |
789 | buf += 0x1000; | |
790 | if ((count + 0x1000) < len) | |
791 | count += 0x1000; | |
792 | else | |
793 | count = len; | |
794 | } | |
795 | ||
796 | /* short packets may only terminate transfers */ | |
797 | if (count != len) | |
798 | count -= (count % maxpacket); | |
799 | } | |
800 | qtd->hw_token = cpu_to_le32((count << 16) | token); | |
801 | qtd->length = count; | |
802 | ||
803 | return count; | |
804 | } | |
805 | ||
806 | static inline void qh_update(struct oxu_hcd *oxu, | |
807 | struct ehci_qh *qh, struct ehci_qtd *qtd) | |
808 | { | |
809 | /* writes to an active overlay are unsafe */ | |
810 | BUG_ON(qh->qh_state != QH_STATE_IDLE); | |
811 | ||
812 | qh->hw_qtd_next = QTD_NEXT(qtd->qtd_dma); | |
813 | qh->hw_alt_next = EHCI_LIST_END; | |
814 | ||
815 | /* Except for control endpoints, we make hardware maintain data | |
816 | * toggle (like OHCI) ... here (re)initialize the toggle in the QH, | |
817 | * and set the pseudo-toggle in udev. Only usb_clear_halt() will | |
818 | * ever clear it. | |
819 | */ | |
820 | if (!(qh->hw_info1 & cpu_to_le32(1 << 14))) { | |
821 | unsigned is_out, epnum; | |
822 | ||
823 | is_out = !(qtd->hw_token & cpu_to_le32(1 << 8)); | |
824 | epnum = (le32_to_cpup(&qh->hw_info1) >> 8) & 0x0f; | |
825 | if (unlikely(!usb_gettoggle(qh->dev, epnum, is_out))) { | |
551509d2 | 826 | qh->hw_token &= ~cpu_to_le32(QTD_TOGGLE); |
b92a78e5 RG |
827 | usb_settoggle(qh->dev, epnum, is_out, 1); |
828 | } | |
829 | } | |
830 | ||
831 | /* HC must see latest qtd and qh data before we clear ACTIVE+HALT */ | |
832 | wmb(); | |
551509d2 | 833 | qh->hw_token &= cpu_to_le32(QTD_TOGGLE | QTD_STS_PING); |
b92a78e5 RG |
834 | } |
835 | ||
836 | /* If it weren't for a common silicon quirk (writing the dummy into the qh | |
837 | * overlay, so qh->hw_token wrongly becomes inactive/halted), only fault | |
838 | * recovery (including urb dequeue) would need software changes to a QH... | |
839 | */ | |
840 | static void qh_refresh(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
841 | { | |
842 | struct ehci_qtd *qtd; | |
843 | ||
844 | if (list_empty(&qh->qtd_list)) | |
845 | qtd = qh->dummy; | |
846 | else { | |
847 | qtd = list_entry(qh->qtd_list.next, | |
848 | struct ehci_qtd, qtd_list); | |
849 | /* first qtd may already be partially processed */ | |
850 | if (cpu_to_le32(qtd->qtd_dma) == qh->hw_current) | |
851 | qtd = NULL; | |
852 | } | |
853 | ||
854 | if (qtd) | |
855 | qh_update(oxu, qh, qtd); | |
856 | } | |
857 | ||
858 | static void qtd_copy_status(struct oxu_hcd *oxu, struct urb *urb, | |
859 | size_t length, u32 token) | |
860 | { | |
861 | /* count IN/OUT bytes, not SETUP (even short packets) */ | |
862 | if (likely(QTD_PID(token) != 2)) | |
863 | urb->actual_length += length - QTD_LENGTH(token); | |
864 | ||
865 | /* don't modify error codes */ | |
866 | if (unlikely(urb->status != -EINPROGRESS)) | |
867 | return; | |
868 | ||
869 | /* force cleanup after short read; not always an error */ | |
870 | if (unlikely(IS_SHORT_READ(token))) | |
871 | urb->status = -EREMOTEIO; | |
872 | ||
873 | /* serious "can't proceed" faults reported by the hardware */ | |
874 | if (token & QTD_STS_HALT) { | |
875 | if (token & QTD_STS_BABBLE) { | |
876 | /* FIXME "must" disable babbling device's port too */ | |
877 | urb->status = -EOVERFLOW; | |
878 | } else if (token & QTD_STS_MMF) { | |
879 | /* fs/ls interrupt xfer missed the complete-split */ | |
880 | urb->status = -EPROTO; | |
881 | } else if (token & QTD_STS_DBE) { | |
882 | urb->status = (QTD_PID(token) == 1) /* IN ? */ | |
883 | ? -ENOSR /* hc couldn't read data */ | |
884 | : -ECOMM; /* hc couldn't write data */ | |
885 | } else if (token & QTD_STS_XACT) { | |
886 | /* timeout, bad crc, wrong PID, etc; retried */ | |
887 | if (QTD_CERR(token)) | |
888 | urb->status = -EPIPE; | |
889 | else { | |
890 | oxu_dbg(oxu, "devpath %s ep%d%s 3strikes\n", | |
891 | urb->dev->devpath, | |
892 | usb_pipeendpoint(urb->pipe), | |
893 | usb_pipein(urb->pipe) ? "in" : "out"); | |
894 | urb->status = -EPROTO; | |
895 | } | |
896 | /* CERR nonzero + no errors + halt --> stall */ | |
897 | } else if (QTD_CERR(token)) | |
898 | urb->status = -EPIPE; | |
899 | else /* unknown */ | |
900 | urb->status = -EPROTO; | |
901 | ||
902 | oxu_vdbg(oxu, "dev%d ep%d%s qtd token %08x --> status %d\n", | |
903 | usb_pipedevice(urb->pipe), | |
904 | usb_pipeendpoint(urb->pipe), | |
905 | usb_pipein(urb->pipe) ? "in" : "out", | |
906 | token, urb->status); | |
907 | } | |
908 | } | |
909 | ||
910 | static void ehci_urb_done(struct oxu_hcd *oxu, struct urb *urb) | |
911 | __releases(oxu->lock) | |
912 | __acquires(oxu->lock) | |
913 | { | |
914 | if (likely(urb->hcpriv != NULL)) { | |
915 | struct ehci_qh *qh = (struct ehci_qh *) urb->hcpriv; | |
916 | ||
917 | /* S-mask in a QH means it's an interrupt urb */ | |
551509d2 | 918 | if ((qh->hw_info2 & cpu_to_le32(QH_SMASK)) != 0) { |
b92a78e5 RG |
919 | |
920 | /* ... update hc-wide periodic stats (for usbfs) */ | |
921 | oxu_to_hcd(oxu)->self.bandwidth_int_reqs--; | |
922 | } | |
923 | qh_put(qh); | |
924 | } | |
925 | ||
926 | urb->hcpriv = NULL; | |
927 | switch (urb->status) { | |
928 | case -EINPROGRESS: /* success */ | |
929 | urb->status = 0; | |
930 | default: /* fault */ | |
931 | break; | |
932 | case -EREMOTEIO: /* fault or normal */ | |
933 | if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) | |
934 | urb->status = 0; | |
935 | break; | |
936 | case -ECONNRESET: /* canceled */ | |
937 | case -ENOENT: | |
938 | break; | |
939 | } | |
940 | ||
941 | #ifdef OXU_URB_TRACE | |
942 | oxu_dbg(oxu, "%s %s urb %p ep%d%s status %d len %d/%d\n", | |
943 | __func__, urb->dev->devpath, urb, | |
944 | usb_pipeendpoint(urb->pipe), | |
945 | usb_pipein(urb->pipe) ? "in" : "out", | |
946 | urb->status, | |
947 | urb->actual_length, urb->transfer_buffer_length); | |
948 | #endif | |
949 | ||
950 | /* complete() can reenter this HCD */ | |
951 | spin_unlock(&oxu->lock); | |
952 | usb_hcd_giveback_urb(oxu_to_hcd(oxu), urb, urb->status); | |
953 | spin_lock(&oxu->lock); | |
954 | } | |
955 | ||
956 | static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh); | |
957 | static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh); | |
958 | ||
959 | static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh); | |
960 | static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh); | |
961 | ||
551509d2 | 962 | #define HALT_BIT cpu_to_le32(QTD_STS_HALT) |
b92a78e5 RG |
963 | |
964 | /* Process and free completed qtds for a qh, returning URBs to drivers. | |
965 | * Chases up to qh->hw_current. Returns number of completions called, | |
966 | * indicating how much "real" work we did. | |
967 | */ | |
968 | static unsigned qh_completions(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
969 | { | |
970 | struct ehci_qtd *last = NULL, *end = qh->dummy; | |
7b1585f2 | 971 | struct ehci_qtd *qtd, *tmp; |
b92a78e5 RG |
972 | int stopped; |
973 | unsigned count = 0; | |
974 | int do_status = 0; | |
975 | u8 state; | |
976 | struct oxu_murb *murb = NULL; | |
977 | ||
978 | if (unlikely(list_empty(&qh->qtd_list))) | |
979 | return count; | |
980 | ||
981 | /* completions (or tasks on other cpus) must never clobber HALT | |
982 | * till we've gone through and cleaned everything up, even when | |
983 | * they add urbs to this qh's queue or mark them for unlinking. | |
984 | * | |
985 | * NOTE: unlinking expects to be done in queue order. | |
986 | */ | |
987 | state = qh->qh_state; | |
988 | qh->qh_state = QH_STATE_COMPLETING; | |
989 | stopped = (state == QH_STATE_IDLE); | |
990 | ||
991 | /* remove de-activated QTDs from front of queue. | |
992 | * after faults (including short reads), cleanup this urb | |
993 | * then let the queue advance. | |
994 | * if queue is stopped, handles unlinks. | |
995 | */ | |
7b1585f2 | 996 | list_for_each_entry_safe(qtd, tmp, &qh->qtd_list, qtd_list) { |
b92a78e5 RG |
997 | struct urb *urb; |
998 | u32 token = 0; | |
999 | ||
b92a78e5 RG |
1000 | urb = qtd->urb; |
1001 | ||
1002 | /* Clean up any state from previous QTD ...*/ | |
1003 | if (last) { | |
1004 | if (likely(last->urb != urb)) { | |
1005 | if (last->urb->complete == NULL) { | |
1006 | murb = (struct oxu_murb *) last->urb; | |
1007 | last->urb = murb->main; | |
1008 | if (murb->last) { | |
1009 | ehci_urb_done(oxu, last->urb); | |
1010 | count++; | |
1011 | } | |
1012 | oxu_murb_free(oxu, murb); | |
1013 | } else { | |
1014 | ehci_urb_done(oxu, last->urb); | |
1015 | count++; | |
1016 | } | |
1017 | } | |
1018 | oxu_qtd_free(oxu, last); | |
1019 | last = NULL; | |
1020 | } | |
1021 | ||
1022 | /* ignore urbs submitted during completions we reported */ | |
1023 | if (qtd == end) | |
1024 | break; | |
1025 | ||
1026 | /* hardware copies qtd out of qh overlay */ | |
1027 | rmb(); | |
1028 | token = le32_to_cpu(qtd->hw_token); | |
1029 | ||
1030 | /* always clean up qtds the hc de-activated */ | |
1031 | if ((token & QTD_STS_ACTIVE) == 0) { | |
1032 | ||
1033 | if ((token & QTD_STS_HALT) != 0) { | |
1034 | stopped = 1; | |
1035 | ||
1036 | /* magic dummy for some short reads; qh won't advance. | |
1037 | * that silicon quirk can kick in with this dummy too. | |
1038 | */ | |
1039 | } else if (IS_SHORT_READ(token) && | |
1040 | !(qtd->hw_alt_next & EHCI_LIST_END)) { | |
1041 | stopped = 1; | |
1042 | goto halt; | |
1043 | } | |
1044 | ||
1045 | /* stop scanning when we reach qtds the hc is using */ | |
1046 | } else if (likely(!stopped && | |
1047 | HC_IS_RUNNING(oxu_to_hcd(oxu)->state))) { | |
1048 | break; | |
1049 | ||
1050 | } else { | |
1051 | stopped = 1; | |
1052 | ||
1053 | if (unlikely(!HC_IS_RUNNING(oxu_to_hcd(oxu)->state))) | |
1054 | urb->status = -ESHUTDOWN; | |
1055 | ||
1056 | /* ignore active urbs unless some previous qtd | |
1057 | * for the urb faulted (including short read) or | |
1058 | * its urb was canceled. we may patch qh or qtds. | |
1059 | */ | |
1060 | if (likely(urb->status == -EINPROGRESS)) | |
1061 | continue; | |
1062 | ||
1063 | /* issue status after short control reads */ | |
1064 | if (unlikely(do_status != 0) | |
1065 | && QTD_PID(token) == 0 /* OUT */) { | |
1066 | do_status = 0; | |
1067 | continue; | |
1068 | } | |
1069 | ||
1070 | /* token in overlay may be most current */ | |
1071 | if (state == QH_STATE_IDLE | |
1072 | && cpu_to_le32(qtd->qtd_dma) | |
1073 | == qh->hw_current) | |
1074 | token = le32_to_cpu(qh->hw_token); | |
1075 | ||
1076 | /* force halt for unlinked or blocked qh, so we'll | |
1077 | * patch the qh later and so that completions can't | |
1078 | * activate it while we "know" it's stopped. | |
1079 | */ | |
1080 | if ((HALT_BIT & qh->hw_token) == 0) { | |
1081 | halt: | |
1082 | qh->hw_token |= HALT_BIT; | |
1083 | wmb(); | |
1084 | } | |
1085 | } | |
1086 | ||
1087 | /* Remove it from the queue */ | |
1088 | qtd_copy_status(oxu, urb->complete ? | |
1089 | urb : ((struct oxu_murb *) urb)->main, | |
1090 | qtd->length, token); | |
1091 | if ((usb_pipein(qtd->urb->pipe)) && | |
1092 | (NULL != qtd->transfer_buffer)) | |
1093 | memcpy(qtd->transfer_buffer, qtd->buffer, qtd->length); | |
1094 | do_status = (urb->status == -EREMOTEIO) | |
1095 | && usb_pipecontrol(urb->pipe); | |
1096 | ||
1097 | if (stopped && qtd->qtd_list.prev != &qh->qtd_list) { | |
1098 | last = list_entry(qtd->qtd_list.prev, | |
1099 | struct ehci_qtd, qtd_list); | |
1100 | last->hw_next = qtd->hw_next; | |
1101 | } | |
1102 | list_del(&qtd->qtd_list); | |
1103 | last = qtd; | |
1104 | } | |
1105 | ||
1106 | /* last urb's completion might still need calling */ | |
1107 | if (likely(last != NULL)) { | |
1108 | if (last->urb->complete == NULL) { | |
1109 | murb = (struct oxu_murb *) last->urb; | |
1110 | last->urb = murb->main; | |
1111 | if (murb->last) { | |
1112 | ehci_urb_done(oxu, last->urb); | |
1113 | count++; | |
1114 | } | |
1115 | oxu_murb_free(oxu, murb); | |
1116 | } else { | |
1117 | ehci_urb_done(oxu, last->urb); | |
1118 | count++; | |
1119 | } | |
1120 | oxu_qtd_free(oxu, last); | |
1121 | } | |
1122 | ||
1123 | /* restore original state; caller must unlink or relink */ | |
1124 | qh->qh_state = state; | |
1125 | ||
1126 | /* be sure the hardware's done with the qh before refreshing | |
1127 | * it after fault cleanup, or recovering from silicon wrongly | |
1128 | * overlaying the dummy qtd (which reduces DMA chatter). | |
1129 | */ | |
1130 | if (stopped != 0 || qh->hw_qtd_next == EHCI_LIST_END) { | |
1131 | switch (state) { | |
1132 | case QH_STATE_IDLE: | |
1133 | qh_refresh(oxu, qh); | |
1134 | break; | |
1135 | case QH_STATE_LINKED: | |
1136 | /* should be rare for periodic transfers, | |
1137 | * except maybe high bandwidth ... | |
1138 | */ | |
551509d2 | 1139 | if ((cpu_to_le32(QH_SMASK) |
b92a78e5 RG |
1140 | & qh->hw_info2) != 0) { |
1141 | intr_deschedule(oxu, qh); | |
1142 | (void) qh_schedule(oxu, qh); | |
1143 | } else | |
1144 | unlink_async(oxu, qh); | |
1145 | break; | |
1146 | /* otherwise, unlink already started */ | |
1147 | } | |
1148 | } | |
1149 | ||
1150 | return count; | |
1151 | } | |
1152 | ||
1153 | /* High bandwidth multiplier, as encoded in highspeed endpoint descriptors */ | |
1154 | #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) | |
1155 | /* ... and packet size, for any kind of endpoint descriptor */ | |
1156 | #define max_packet(wMaxPacketSize) ((wMaxPacketSize) & 0x07ff) | |
1157 | ||
1158 | /* Reverse of qh_urb_transaction: free a list of TDs. | |
1159 | * used for cleanup after errors, before HC sees an URB's TDs. | |
1160 | */ | |
1161 | static void qtd_list_free(struct oxu_hcd *oxu, | |
7b1585f2 | 1162 | struct urb *urb, struct list_head *head) |
b92a78e5 | 1163 | { |
7b1585f2 | 1164 | struct ehci_qtd *qtd, *temp; |
b92a78e5 | 1165 | |
7b1585f2 | 1166 | list_for_each_entry_safe(qtd, temp, head, qtd_list) { |
b92a78e5 RG |
1167 | list_del(&qtd->qtd_list); |
1168 | oxu_qtd_free(oxu, qtd); | |
1169 | } | |
1170 | } | |
1171 | ||
1172 | /* Create a list of filled qtds for this URB; won't link into qh. | |
1173 | */ | |
1174 | static struct list_head *qh_urb_transaction(struct oxu_hcd *oxu, | |
1175 | struct urb *urb, | |
1176 | struct list_head *head, | |
1177 | gfp_t flags) | |
1178 | { | |
1179 | struct ehci_qtd *qtd, *qtd_prev; | |
1180 | dma_addr_t buf; | |
1181 | int len, maxpacket; | |
1182 | int is_input; | |
1183 | u32 token; | |
1184 | void *transfer_buf = NULL; | |
1185 | int ret; | |
1186 | ||
1187 | /* | |
1188 | * URBs map to sequences of QTDs: one logical transaction | |
1189 | */ | |
1190 | qtd = ehci_qtd_alloc(oxu); | |
1191 | if (unlikely(!qtd)) | |
1192 | return NULL; | |
1193 | list_add_tail(&qtd->qtd_list, head); | |
1194 | qtd->urb = urb; | |
1195 | ||
1196 | token = QTD_STS_ACTIVE; | |
1197 | token |= (EHCI_TUNE_CERR << 10); | |
1198 | /* for split transactions, SplitXState initialized to zero */ | |
1199 | ||
1200 | len = urb->transfer_buffer_length; | |
1201 | is_input = usb_pipein(urb->pipe); | |
1202 | if (!urb->transfer_buffer && urb->transfer_buffer_length && is_input) | |
1203 | urb->transfer_buffer = phys_to_virt(urb->transfer_dma); | |
1204 | ||
1205 | if (usb_pipecontrol(urb->pipe)) { | |
1206 | /* SETUP pid */ | |
1207 | ret = oxu_buf_alloc(oxu, qtd, sizeof(struct usb_ctrlrequest)); | |
1208 | if (ret) | |
1209 | goto cleanup; | |
1210 | ||
1211 | qtd_fill(qtd, qtd->buffer_dma, sizeof(struct usb_ctrlrequest), | |
1212 | token | (2 /* "setup" */ << 8), 8); | |
1213 | memcpy(qtd->buffer, qtd->urb->setup_packet, | |
1214 | sizeof(struct usb_ctrlrequest)); | |
1215 | ||
1216 | /* ... and always at least one more pid */ | |
1217 | token ^= QTD_TOGGLE; | |
1218 | qtd_prev = qtd; | |
1219 | qtd = ehci_qtd_alloc(oxu); | |
1220 | if (unlikely(!qtd)) | |
1221 | goto cleanup; | |
1222 | qtd->urb = urb; | |
1223 | qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma); | |
1224 | list_add_tail(&qtd->qtd_list, head); | |
1225 | ||
1226 | /* for zero length DATA stages, STATUS is always IN */ | |
1227 | if (len == 0) | |
1228 | token |= (1 /* "in" */ << 8); | |
1229 | } | |
1230 | ||
1231 | /* | |
1232 | * Data transfer stage: buffer setup | |
1233 | */ | |
1234 | ||
1235 | ret = oxu_buf_alloc(oxu, qtd, len); | |
1236 | if (ret) | |
1237 | goto cleanup; | |
1238 | ||
1239 | buf = qtd->buffer_dma; | |
1240 | transfer_buf = urb->transfer_buffer; | |
1241 | ||
1242 | if (!is_input) | |
1243 | memcpy(qtd->buffer, qtd->urb->transfer_buffer, len); | |
1244 | ||
1245 | if (is_input) | |
1246 | token |= (1 /* "in" */ << 8); | |
1247 | /* else it's already initted to "out" pid (0 << 8) */ | |
1248 | ||
1249 | maxpacket = max_packet(usb_maxpacket(urb->dev, urb->pipe, !is_input)); | |
1250 | ||
1251 | /* | |
1252 | * buffer gets wrapped in one or more qtds; | |
1253 | * last one may be "short" (including zero len) | |
1254 | * and may serve as a control status ack | |
1255 | */ | |
1256 | for (;;) { | |
1257 | int this_qtd_len; | |
1258 | ||
1259 | this_qtd_len = qtd_fill(qtd, buf, len, token, maxpacket); | |
1260 | qtd->transfer_buffer = transfer_buf; | |
1261 | len -= this_qtd_len; | |
1262 | buf += this_qtd_len; | |
1263 | transfer_buf += this_qtd_len; | |
1264 | if (is_input) | |
1265 | qtd->hw_alt_next = oxu->async->hw_alt_next; | |
1266 | ||
1267 | /* qh makes control packets use qtd toggle; maybe switch it */ | |
1268 | if ((maxpacket & (this_qtd_len + (maxpacket - 1))) == 0) | |
1269 | token ^= QTD_TOGGLE; | |
1270 | ||
1271 | if (likely(len <= 0)) | |
1272 | break; | |
1273 | ||
1274 | qtd_prev = qtd; | |
1275 | qtd = ehci_qtd_alloc(oxu); | |
1276 | if (unlikely(!qtd)) | |
1277 | goto cleanup; | |
1278 | if (likely(len > 0)) { | |
1279 | ret = oxu_buf_alloc(oxu, qtd, len); | |
1280 | if (ret) | |
1281 | goto cleanup; | |
1282 | } | |
1283 | qtd->urb = urb; | |
1284 | qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma); | |
1285 | list_add_tail(&qtd->qtd_list, head); | |
1286 | } | |
1287 | ||
1288 | /* unless the bulk/interrupt caller wants a chance to clean | |
1289 | * up after short reads, hc should advance qh past this urb | |
1290 | */ | |
1291 | if (likely((urb->transfer_flags & URB_SHORT_NOT_OK) == 0 | |
1292 | || usb_pipecontrol(urb->pipe))) | |
1293 | qtd->hw_alt_next = EHCI_LIST_END; | |
1294 | ||
1295 | /* | |
1296 | * control requests may need a terminating data "status" ack; | |
1297 | * bulk ones may need a terminating short packet (zero length). | |
1298 | */ | |
1299 | if (likely(urb->transfer_buffer_length != 0)) { | |
1300 | int one_more = 0; | |
1301 | ||
1302 | if (usb_pipecontrol(urb->pipe)) { | |
1303 | one_more = 1; | |
1304 | token ^= 0x0100; /* "in" <--> "out" */ | |
1305 | token |= QTD_TOGGLE; /* force DATA1 */ | |
1306 | } else if (usb_pipebulk(urb->pipe) | |
1307 | && (urb->transfer_flags & URB_ZERO_PACKET) | |
1308 | && !(urb->transfer_buffer_length % maxpacket)) { | |
1309 | one_more = 1; | |
1310 | } | |
1311 | if (one_more) { | |
1312 | qtd_prev = qtd; | |
1313 | qtd = ehci_qtd_alloc(oxu); | |
1314 | if (unlikely(!qtd)) | |
1315 | goto cleanup; | |
1316 | qtd->urb = urb; | |
1317 | qtd_prev->hw_next = QTD_NEXT(qtd->qtd_dma); | |
1318 | list_add_tail(&qtd->qtd_list, head); | |
1319 | ||
1320 | /* never any data in such packets */ | |
1321 | qtd_fill(qtd, 0, 0, token, 0); | |
1322 | } | |
1323 | } | |
1324 | ||
1325 | /* by default, enable interrupt on urb completion */ | |
a8ded8eb | 1326 | qtd->hw_token |= cpu_to_le32(QTD_IOC); |
b92a78e5 RG |
1327 | return head; |
1328 | ||
1329 | cleanup: | |
1330 | qtd_list_free(oxu, urb, head); | |
1331 | return NULL; | |
1332 | } | |
1333 | ||
1334 | /* Each QH holds a qtd list; a QH is used for everything except iso. | |
1335 | * | |
1336 | * For interrupt urbs, the scheduler must set the microframe scheduling | |
1337 | * mask(s) each time the QH gets scheduled. For highspeed, that's | |
1338 | * just one microframe in the s-mask. For split interrupt transactions | |
1339 | * there are additional complications: c-mask, maybe FSTNs. | |
1340 | */ | |
1341 | static struct ehci_qh *qh_make(struct oxu_hcd *oxu, | |
1342 | struct urb *urb, gfp_t flags) | |
1343 | { | |
1344 | struct ehci_qh *qh = oxu_qh_alloc(oxu); | |
1345 | u32 info1 = 0, info2 = 0; | |
1346 | int is_input, type; | |
1347 | int maxp = 0; | |
1348 | ||
1349 | if (!qh) | |
1350 | return qh; | |
1351 | ||
1352 | /* | |
1353 | * init endpoint/device data for this QH | |
1354 | */ | |
1355 | info1 |= usb_pipeendpoint(urb->pipe) << 8; | |
1356 | info1 |= usb_pipedevice(urb->pipe) << 0; | |
1357 | ||
1358 | is_input = usb_pipein(urb->pipe); | |
1359 | type = usb_pipetype(urb->pipe); | |
1360 | maxp = usb_maxpacket(urb->dev, urb->pipe, !is_input); | |
1361 | ||
1362 | /* Compute interrupt scheduling parameters just once, and save. | |
1363 | * - allowing for high bandwidth, how many nsec/uframe are used? | |
1364 | * - split transactions need a second CSPLIT uframe; same question | |
1365 | * - splits also need a schedule gap (for full/low speed I/O) | |
1366 | * - qh has a polling interval | |
1367 | * | |
1368 | * For control/bulk requests, the HC or TT handles these. | |
1369 | */ | |
1370 | if (type == PIPE_INTERRUPT) { | |
1371 | qh->usecs = NS_TO_US(usb_calc_bus_time(USB_SPEED_HIGH, | |
1372 | is_input, 0, | |
1373 | hb_mult(maxp) * max_packet(maxp))); | |
1374 | qh->start = NO_FRAME; | |
1375 | ||
1376 | if (urb->dev->speed == USB_SPEED_HIGH) { | |
1377 | qh->c_usecs = 0; | |
1378 | qh->gap_uf = 0; | |
1379 | ||
1380 | qh->period = urb->interval >> 3; | |
1381 | if (qh->period == 0 && urb->interval != 1) { | |
1382 | /* NOTE interval 2 or 4 uframes could work. | |
1383 | * But interval 1 scheduling is simpler, and | |
1384 | * includes high bandwidth. | |
1385 | */ | |
b5f5bfe0 GKH |
1386 | oxu_dbg(oxu, "intr period %d uframes, NYET!\n", |
1387 | urb->interval); | |
b92a78e5 RG |
1388 | goto done; |
1389 | } | |
1390 | } else { | |
1391 | struct usb_tt *tt = urb->dev->tt; | |
1392 | int think_time; | |
1393 | ||
1394 | /* gap is f(FS/LS transfer times) */ | |
1395 | qh->gap_uf = 1 + usb_calc_bus_time(urb->dev->speed, | |
1396 | is_input, 0, maxp) / (125 * 1000); | |
1397 | ||
1398 | /* FIXME this just approximates SPLIT/CSPLIT times */ | |
1399 | if (is_input) { /* SPLIT, gap, CSPLIT+DATA */ | |
1400 | qh->c_usecs = qh->usecs + HS_USECS(0); | |
1401 | qh->usecs = HS_USECS(1); | |
1402 | } else { /* SPLIT+DATA, gap, CSPLIT */ | |
1403 | qh->usecs += HS_USECS(1); | |
1404 | qh->c_usecs = HS_USECS(0); | |
1405 | } | |
1406 | ||
1407 | think_time = tt ? tt->think_time : 0; | |
1408 | qh->tt_usecs = NS_TO_US(think_time + | |
1409 | usb_calc_bus_time(urb->dev->speed, | |
1410 | is_input, 0, max_packet(maxp))); | |
1411 | qh->period = urb->interval; | |
1412 | } | |
1413 | } | |
1414 | ||
1415 | /* support for tt scheduling, and access to toggles */ | |
1416 | qh->dev = urb->dev; | |
1417 | ||
1418 | /* using TT? */ | |
1419 | switch (urb->dev->speed) { | |
1420 | case USB_SPEED_LOW: | |
1421 | info1 |= (1 << 12); /* EPS "low" */ | |
1422 | /* FALL THROUGH */ | |
1423 | ||
1424 | case USB_SPEED_FULL: | |
1425 | /* EPS 0 means "full" */ | |
1426 | if (type != PIPE_INTERRUPT) | |
1427 | info1 |= (EHCI_TUNE_RL_TT << 28); | |
1428 | if (type == PIPE_CONTROL) { | |
1429 | info1 |= (1 << 27); /* for TT */ | |
1430 | info1 |= 1 << 14; /* toggle from qtd */ | |
1431 | } | |
1432 | info1 |= maxp << 16; | |
1433 | ||
1434 | info2 |= (EHCI_TUNE_MULT_TT << 30); | |
1435 | info2 |= urb->dev->ttport << 23; | |
1436 | ||
1437 | /* NOTE: if (PIPE_INTERRUPT) { scheduler sets c-mask } */ | |
1438 | ||
1439 | break; | |
1440 | ||
1441 | case USB_SPEED_HIGH: /* no TT involved */ | |
1442 | info1 |= (2 << 12); /* EPS "high" */ | |
1443 | if (type == PIPE_CONTROL) { | |
1444 | info1 |= (EHCI_TUNE_RL_HS << 28); | |
1445 | info1 |= 64 << 16; /* usb2 fixed maxpacket */ | |
1446 | info1 |= 1 << 14; /* toggle from qtd */ | |
1447 | info2 |= (EHCI_TUNE_MULT_HS << 30); | |
1448 | } else if (type == PIPE_BULK) { | |
1449 | info1 |= (EHCI_TUNE_RL_HS << 28); | |
1450 | info1 |= 512 << 16; /* usb2 fixed maxpacket */ | |
1451 | info2 |= (EHCI_TUNE_MULT_HS << 30); | |
1452 | } else { /* PIPE_INTERRUPT */ | |
1453 | info1 |= max_packet(maxp) << 16; | |
1454 | info2 |= hb_mult(maxp) << 30; | |
1455 | } | |
1456 | break; | |
1457 | default: | |
b5f5bfe0 | 1458 | oxu_dbg(oxu, "bogus dev %p speed %d\n", urb->dev, urb->dev->speed); |
b92a78e5 RG |
1459 | done: |
1460 | qh_put(qh); | |
1461 | return NULL; | |
1462 | } | |
1463 | ||
1464 | /* NOTE: if (PIPE_INTERRUPT) { scheduler sets s-mask } */ | |
1465 | ||
1466 | /* init as live, toggle clear, advance to dummy */ | |
1467 | qh->qh_state = QH_STATE_IDLE; | |
1468 | qh->hw_info1 = cpu_to_le32(info1); | |
1469 | qh->hw_info2 = cpu_to_le32(info2); | |
1470 | usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), !is_input, 1); | |
1471 | qh_refresh(oxu, qh); | |
1472 | return qh; | |
1473 | } | |
1474 | ||
1475 | /* Move qh (and its qtds) onto async queue; maybe enable queue. | |
1476 | */ | |
1477 | static void qh_link_async(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
1478 | { | |
1479 | __le32 dma = QH_NEXT(qh->qh_dma); | |
1480 | struct ehci_qh *head; | |
1481 | ||
1482 | /* (re)start the async schedule? */ | |
1483 | head = oxu->async; | |
1484 | timer_action_done(oxu, TIMER_ASYNC_OFF); | |
1485 | if (!head->qh_next.qh) { | |
1486 | u32 cmd = readl(&oxu->regs->command); | |
1487 | ||
1488 | if (!(cmd & CMD_ASE)) { | |
1489 | /* in case a clear of CMD_ASE didn't take yet */ | |
1490 | (void)handshake(oxu, &oxu->regs->status, | |
1491 | STS_ASS, 0, 150); | |
1492 | cmd |= CMD_ASE | CMD_RUN; | |
1493 | writel(cmd, &oxu->regs->command); | |
1494 | oxu_to_hcd(oxu)->state = HC_STATE_RUNNING; | |
1495 | /* posted write need not be known to HC yet ... */ | |
1496 | } | |
1497 | } | |
1498 | ||
1499 | /* clear halt and/or toggle; and maybe recover from silicon quirk */ | |
1500 | if (qh->qh_state == QH_STATE_IDLE) | |
1501 | qh_refresh(oxu, qh); | |
1502 | ||
1503 | /* splice right after start */ | |
1504 | qh->qh_next = head->qh_next; | |
1505 | qh->hw_next = head->hw_next; | |
1506 | wmb(); | |
1507 | ||
1508 | head->qh_next.qh = qh; | |
1509 | head->hw_next = dma; | |
1510 | ||
1511 | qh->qh_state = QH_STATE_LINKED; | |
1512 | /* qtd completions reported later by interrupt */ | |
1513 | } | |
1514 | ||
551509d2 | 1515 | #define QH_ADDR_MASK cpu_to_le32(0x7f) |
b92a78e5 RG |
1516 | |
1517 | /* | |
1518 | * For control/bulk/interrupt, return QH with these TDs appended. | |
1519 | * Allocates and initializes the QH if necessary. | |
1520 | * Returns null if it can't allocate a QH it needs to. | |
1521 | * If the QH has TDs (urbs) already, that's great. | |
1522 | */ | |
1523 | static struct ehci_qh *qh_append_tds(struct oxu_hcd *oxu, | |
1524 | struct urb *urb, struct list_head *qtd_list, | |
1525 | int epnum, void **ptr) | |
1526 | { | |
1527 | struct ehci_qh *qh = NULL; | |
1528 | ||
1529 | qh = (struct ehci_qh *) *ptr; | |
1530 | if (unlikely(qh == NULL)) { | |
1531 | /* can't sleep here, we have oxu->lock... */ | |
1532 | qh = qh_make(oxu, urb, GFP_ATOMIC); | |
1533 | *ptr = qh; | |
1534 | } | |
1535 | if (likely(qh != NULL)) { | |
1536 | struct ehci_qtd *qtd; | |
1537 | ||
1538 | if (unlikely(list_empty(qtd_list))) | |
1539 | qtd = NULL; | |
1540 | else | |
1541 | qtd = list_entry(qtd_list->next, struct ehci_qtd, | |
1542 | qtd_list); | |
1543 | ||
1544 | /* control qh may need patching ... */ | |
1545 | if (unlikely(epnum == 0)) { | |
1546 | ||
1547 | /* usb_reset_device() briefly reverts to address 0 */ | |
1548 | if (usb_pipedevice(urb->pipe) == 0) | |
1549 | qh->hw_info1 &= ~QH_ADDR_MASK; | |
1550 | } | |
1551 | ||
1552 | /* just one way to queue requests: swap with the dummy qtd. | |
1553 | * only hc or qh_refresh() ever modify the overlay. | |
1554 | */ | |
1555 | if (likely(qtd != NULL)) { | |
1556 | struct ehci_qtd *dummy; | |
1557 | dma_addr_t dma; | |
1558 | __le32 token; | |
1559 | ||
1560 | /* to avoid racing the HC, use the dummy td instead of | |
1561 | * the first td of our list (becomes new dummy). both | |
1562 | * tds stay deactivated until we're done, when the | |
1563 | * HC is allowed to fetch the old dummy (4.10.2). | |
1564 | */ | |
1565 | token = qtd->hw_token; | |
1566 | qtd->hw_token = HALT_BIT; | |
1567 | wmb(); | |
1568 | dummy = qh->dummy; | |
1569 | ||
1570 | dma = dummy->qtd_dma; | |
1571 | *dummy = *qtd; | |
1572 | dummy->qtd_dma = dma; | |
1573 | ||
1574 | list_del(&qtd->qtd_list); | |
1575 | list_add(&dummy->qtd_list, qtd_list); | |
1576 | list_splice(qtd_list, qh->qtd_list.prev); | |
1577 | ||
1578 | ehci_qtd_init(qtd, qtd->qtd_dma); | |
1579 | qh->dummy = qtd; | |
1580 | ||
1581 | /* hc must see the new dummy at list end */ | |
1582 | dma = qtd->qtd_dma; | |
1583 | qtd = list_entry(qh->qtd_list.prev, | |
1584 | struct ehci_qtd, qtd_list); | |
1585 | qtd->hw_next = QTD_NEXT(dma); | |
1586 | ||
1587 | /* let the hc process these next qtds */ | |
1588 | dummy->hw_token = (token & ~(0x80)); | |
1589 | wmb(); | |
1590 | dummy->hw_token = token; | |
1591 | ||
1592 | urb->hcpriv = qh_get(qh); | |
1593 | } | |
1594 | } | |
1595 | return qh; | |
1596 | } | |
1597 | ||
1598 | static int submit_async(struct oxu_hcd *oxu, struct urb *urb, | |
1599 | struct list_head *qtd_list, gfp_t mem_flags) | |
1600 | { | |
1601 | struct ehci_qtd *qtd; | |
1602 | int epnum; | |
1603 | unsigned long flags; | |
1604 | struct ehci_qh *qh = NULL; | |
1605 | int rc = 0; | |
1606 | ||
1607 | qtd = list_entry(qtd_list->next, struct ehci_qtd, qtd_list); | |
1608 | epnum = urb->ep->desc.bEndpointAddress; | |
1609 | ||
1610 | #ifdef OXU_URB_TRACE | |
1611 | oxu_dbg(oxu, "%s %s urb %p ep%d%s len %d, qtd %p [qh %p]\n", | |
1612 | __func__, urb->dev->devpath, urb, | |
1613 | epnum & 0x0f, (epnum & USB_DIR_IN) ? "in" : "out", | |
1614 | urb->transfer_buffer_length, | |
1615 | qtd, urb->ep->hcpriv); | |
1616 | #endif | |
1617 | ||
1618 | spin_lock_irqsave(&oxu->lock, flags); | |
541c7d43 | 1619 | if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) { |
b92a78e5 RG |
1620 | rc = -ESHUTDOWN; |
1621 | goto done; | |
1622 | } | |
1623 | ||
1624 | qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv); | |
1625 | if (unlikely(qh == NULL)) { | |
1626 | rc = -ENOMEM; | |
1627 | goto done; | |
1628 | } | |
1629 | ||
1630 | /* Control/bulk operations through TTs don't need scheduling, | |
1631 | * the HC and TT handle it when the TT has a buffer ready. | |
1632 | */ | |
1633 | if (likely(qh->qh_state == QH_STATE_IDLE)) | |
1634 | qh_link_async(oxu, qh_get(qh)); | |
1635 | done: | |
1636 | spin_unlock_irqrestore(&oxu->lock, flags); | |
1637 | if (unlikely(qh == NULL)) | |
1638 | qtd_list_free(oxu, urb, qtd_list); | |
1639 | return rc; | |
1640 | } | |
1641 | ||
1642 | /* The async qh for the qtds being reclaimed are now unlinked from the HC */ | |
1643 | ||
1644 | static void end_unlink_async(struct oxu_hcd *oxu) | |
1645 | { | |
1646 | struct ehci_qh *qh = oxu->reclaim; | |
1647 | struct ehci_qh *next; | |
1648 | ||
1649 | timer_action_done(oxu, TIMER_IAA_WATCHDOG); | |
1650 | ||
1651 | qh->qh_state = QH_STATE_IDLE; | |
1652 | qh->qh_next.qh = NULL; | |
1653 | qh_put(qh); /* refcount from reclaim */ | |
1654 | ||
1655 | /* other unlink(s) may be pending (in QH_STATE_UNLINK_WAIT) */ | |
1656 | next = qh->reclaim; | |
1657 | oxu->reclaim = next; | |
1658 | oxu->reclaim_ready = 0; | |
1659 | qh->reclaim = NULL; | |
1660 | ||
1661 | qh_completions(oxu, qh); | |
1662 | ||
1663 | if (!list_empty(&qh->qtd_list) | |
1664 | && HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) | |
1665 | qh_link_async(oxu, qh); | |
1666 | else { | |
1667 | qh_put(qh); /* refcount from async list */ | |
1668 | ||
1669 | /* it's not free to turn the async schedule on/off; leave it | |
1670 | * active but idle for a while once it empties. | |
1671 | */ | |
1672 | if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state) | |
1673 | && oxu->async->qh_next.qh == NULL) | |
1674 | timer_action(oxu, TIMER_ASYNC_OFF); | |
1675 | } | |
1676 | ||
1677 | if (next) { | |
1678 | oxu->reclaim = NULL; | |
1679 | start_unlink_async(oxu, next); | |
1680 | } | |
1681 | } | |
1682 | ||
1683 | /* makes sure the async qh will become idle */ | |
1684 | /* caller must own oxu->lock */ | |
1685 | ||
1686 | static void start_unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
1687 | { | |
1688 | int cmd = readl(&oxu->regs->command); | |
1689 | struct ehci_qh *prev; | |
1690 | ||
1691 | #ifdef DEBUG | |
1692 | assert_spin_locked(&oxu->lock); | |
debe26af GB |
1693 | BUG_ON(oxu->reclaim || (qh->qh_state != QH_STATE_LINKED |
1694 | && qh->qh_state != QH_STATE_UNLINK_WAIT)); | |
b92a78e5 RG |
1695 | #endif |
1696 | ||
1697 | /* stop async schedule right now? */ | |
1698 | if (unlikely(qh == oxu->async)) { | |
1699 | /* can't get here without STS_ASS set */ | |
1700 | if (oxu_to_hcd(oxu)->state != HC_STATE_HALT | |
1701 | && !oxu->reclaim) { | |
1702 | /* ... and CMD_IAAD clear */ | |
1703 | writel(cmd & ~CMD_ASE, &oxu->regs->command); | |
1704 | wmb(); | |
1705 | /* handshake later, if we need to */ | |
1706 | timer_action_done(oxu, TIMER_ASYNC_OFF); | |
1707 | } | |
1708 | return; | |
1709 | } | |
1710 | ||
1711 | qh->qh_state = QH_STATE_UNLINK; | |
1712 | oxu->reclaim = qh = qh_get(qh); | |
1713 | ||
1714 | prev = oxu->async; | |
1715 | while (prev->qh_next.qh != qh) | |
1716 | prev = prev->qh_next.qh; | |
1717 | ||
1718 | prev->hw_next = qh->hw_next; | |
1719 | prev->qh_next = qh->qh_next; | |
1720 | wmb(); | |
1721 | ||
1722 | if (unlikely(oxu_to_hcd(oxu)->state == HC_STATE_HALT)) { | |
1723 | /* if (unlikely(qh->reclaim != 0)) | |
1724 | * this will recurse, probably not much | |
1725 | */ | |
1726 | end_unlink_async(oxu); | |
1727 | return; | |
1728 | } | |
1729 | ||
1730 | oxu->reclaim_ready = 0; | |
1731 | cmd |= CMD_IAAD; | |
1732 | writel(cmd, &oxu->regs->command); | |
1733 | (void) readl(&oxu->regs->command); | |
1734 | timer_action(oxu, TIMER_IAA_WATCHDOG); | |
1735 | } | |
1736 | ||
1737 | static void scan_async(struct oxu_hcd *oxu) | |
1738 | { | |
1739 | struct ehci_qh *qh; | |
1740 | enum ehci_timer_action action = TIMER_IO_WATCHDOG; | |
1741 | ||
1742 | if (!++(oxu->stamp)) | |
1743 | oxu->stamp++; | |
1744 | timer_action_done(oxu, TIMER_ASYNC_SHRINK); | |
1745 | rescan: | |
1746 | qh = oxu->async->qh_next.qh; | |
1747 | if (likely(qh != NULL)) { | |
1748 | do { | |
1749 | /* clean any finished work for this qh */ | |
1750 | if (!list_empty(&qh->qtd_list) | |
1751 | && qh->stamp != oxu->stamp) { | |
1752 | int temp; | |
1753 | ||
1754 | /* unlinks could happen here; completion | |
1755 | * reporting drops the lock. rescan using | |
1756 | * the latest schedule, but don't rescan | |
1757 | * qhs we already finished (no looping). | |
1758 | */ | |
1759 | qh = qh_get(qh); | |
1760 | qh->stamp = oxu->stamp; | |
1761 | temp = qh_completions(oxu, qh); | |
1762 | qh_put(qh); | |
1763 | if (temp != 0) | |
1764 | goto rescan; | |
1765 | } | |
1766 | ||
1767 | /* unlink idle entries, reducing HC PCI usage as well | |
1768 | * as HCD schedule-scanning costs. delay for any qh | |
1769 | * we just scanned, there's a not-unusual case that it | |
1770 | * doesn't stay idle for long. | |
1771 | * (plus, avoids some kind of re-activation race.) | |
1772 | */ | |
1773 | if (list_empty(&qh->qtd_list)) { | |
1774 | if (qh->stamp == oxu->stamp) | |
1775 | action = TIMER_ASYNC_SHRINK; | |
1776 | else if (!oxu->reclaim | |
1777 | && qh->qh_state == QH_STATE_LINKED) | |
1778 | start_unlink_async(oxu, qh); | |
1779 | } | |
1780 | ||
1781 | qh = qh->qh_next.qh; | |
1782 | } while (qh); | |
1783 | } | |
1784 | if (action == TIMER_ASYNC_SHRINK) | |
1785 | timer_action(oxu, TIMER_ASYNC_SHRINK); | |
1786 | } | |
1787 | ||
1788 | /* | |
1789 | * periodic_next_shadow - return "next" pointer on shadow list | |
1790 | * @periodic: host pointer to qh/itd/sitd | |
1791 | * @tag: hardware tag for type of this record | |
1792 | */ | |
1793 | static union ehci_shadow *periodic_next_shadow(union ehci_shadow *periodic, | |
1794 | __le32 tag) | |
1795 | { | |
1796 | switch (tag) { | |
1797 | default: | |
1798 | case Q_TYPE_QH: | |
1799 | return &periodic->qh->qh_next; | |
1800 | } | |
1801 | } | |
1802 | ||
1803 | /* caller must hold oxu->lock */ | |
1804 | static void periodic_unlink(struct oxu_hcd *oxu, unsigned frame, void *ptr) | |
1805 | { | |
1806 | union ehci_shadow *prev_p = &oxu->pshadow[frame]; | |
1807 | __le32 *hw_p = &oxu->periodic[frame]; | |
1808 | union ehci_shadow here = *prev_p; | |
1809 | ||
1810 | /* find predecessor of "ptr"; hw and shadow lists are in sync */ | |
1811 | while (here.ptr && here.ptr != ptr) { | |
1812 | prev_p = periodic_next_shadow(prev_p, Q_NEXT_TYPE(*hw_p)); | |
1813 | hw_p = here.hw_next; | |
1814 | here = *prev_p; | |
1815 | } | |
1816 | /* an interrupt entry (at list end) could have been shared */ | |
1817 | if (!here.ptr) | |
1818 | return; | |
1819 | ||
1820 | /* update shadow and hardware lists ... the old "next" pointers | |
1821 | * from ptr may still be in use, the caller updates them. | |
1822 | */ | |
1823 | *prev_p = *periodic_next_shadow(&here, Q_NEXT_TYPE(*hw_p)); | |
1824 | *hw_p = *here.hw_next; | |
1825 | } | |
1826 | ||
1827 | /* how many of the uframe's 125 usecs are allocated? */ | |
1828 | static unsigned short periodic_usecs(struct oxu_hcd *oxu, | |
1829 | unsigned frame, unsigned uframe) | |
1830 | { | |
1831 | __le32 *hw_p = &oxu->periodic[frame]; | |
1832 | union ehci_shadow *q = &oxu->pshadow[frame]; | |
1833 | unsigned usecs = 0; | |
1834 | ||
1835 | while (q->ptr) { | |
1836 | switch (Q_NEXT_TYPE(*hw_p)) { | |
1837 | case Q_TYPE_QH: | |
1838 | default: | |
1839 | /* is it in the S-mask? */ | |
1840 | if (q->qh->hw_info2 & cpu_to_le32(1 << uframe)) | |
1841 | usecs += q->qh->usecs; | |
1842 | /* ... or C-mask? */ | |
1843 | if (q->qh->hw_info2 & cpu_to_le32(1 << (8 + uframe))) | |
1844 | usecs += q->qh->c_usecs; | |
1845 | hw_p = &q->qh->hw_next; | |
1846 | q = &q->qh->qh_next; | |
1847 | break; | |
1848 | } | |
1849 | } | |
1850 | #ifdef DEBUG | |
1851 | if (usecs > 100) | |
1852 | oxu_err(oxu, "uframe %d sched overrun: %d usecs\n", | |
1853 | frame * 8 + uframe, usecs); | |
1854 | #endif | |
1855 | return usecs; | |
1856 | } | |
1857 | ||
1858 | static int enable_periodic(struct oxu_hcd *oxu) | |
1859 | { | |
1860 | u32 cmd; | |
1861 | int status; | |
1862 | ||
1863 | /* did clearing PSE did take effect yet? | |
1864 | * takes effect only at frame boundaries... | |
1865 | */ | |
1866 | status = handshake(oxu, &oxu->regs->status, STS_PSS, 0, 9 * 125); | |
1867 | if (status != 0) { | |
1868 | oxu_to_hcd(oxu)->state = HC_STATE_HALT; | |
69fff59d | 1869 | usb_hc_died(oxu_to_hcd(oxu)); |
b92a78e5 RG |
1870 | return status; |
1871 | } | |
1872 | ||
1873 | cmd = readl(&oxu->regs->command) | CMD_PSE; | |
1874 | writel(cmd, &oxu->regs->command); | |
1875 | /* posted write ... PSS happens later */ | |
1876 | oxu_to_hcd(oxu)->state = HC_STATE_RUNNING; | |
1877 | ||
1878 | /* make sure ehci_work scans these */ | |
1879 | oxu->next_uframe = readl(&oxu->regs->frame_index) | |
1880 | % (oxu->periodic_size << 3); | |
1881 | return 0; | |
1882 | } | |
1883 | ||
1884 | static int disable_periodic(struct oxu_hcd *oxu) | |
1885 | { | |
1886 | u32 cmd; | |
1887 | int status; | |
1888 | ||
1889 | /* did setting PSE not take effect yet? | |
1890 | * takes effect only at frame boundaries... | |
1891 | */ | |
1892 | status = handshake(oxu, &oxu->regs->status, STS_PSS, STS_PSS, 9 * 125); | |
1893 | if (status != 0) { | |
1894 | oxu_to_hcd(oxu)->state = HC_STATE_HALT; | |
69fff59d | 1895 | usb_hc_died(oxu_to_hcd(oxu)); |
b92a78e5 RG |
1896 | return status; |
1897 | } | |
1898 | ||
1899 | cmd = readl(&oxu->regs->command) & ~CMD_PSE; | |
1900 | writel(cmd, &oxu->regs->command); | |
1901 | /* posted write ... */ | |
1902 | ||
1903 | oxu->next_uframe = -1; | |
1904 | return 0; | |
1905 | } | |
1906 | ||
1907 | /* periodic schedule slots have iso tds (normal or split) first, then a | |
1908 | * sparse tree for active interrupt transfers. | |
1909 | * | |
1910 | * this just links in a qh; caller guarantees uframe masks are set right. | |
1911 | * no FSTN support (yet; oxu 0.96+) | |
1912 | */ | |
1913 | static int qh_link_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
1914 | { | |
1915 | unsigned i; | |
1916 | unsigned period = qh->period; | |
1917 | ||
1918 | dev_dbg(&qh->dev->dev, | |
1919 | "link qh%d-%04x/%p start %d [%d/%d us]\n", | |
1920 | period, le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK), | |
1921 | qh, qh->start, qh->usecs, qh->c_usecs); | |
1922 | ||
1923 | /* high bandwidth, or otherwise every microframe */ | |
1924 | if (period == 0) | |
1925 | period = 1; | |
1926 | ||
1927 | for (i = qh->start; i < oxu->periodic_size; i += period) { | |
1928 | union ehci_shadow *prev = &oxu->pshadow[i]; | |
1929 | __le32 *hw_p = &oxu->periodic[i]; | |
1930 | union ehci_shadow here = *prev; | |
1931 | __le32 type = 0; | |
1932 | ||
1933 | /* skip the iso nodes at list head */ | |
1934 | while (here.ptr) { | |
1935 | type = Q_NEXT_TYPE(*hw_p); | |
1936 | if (type == Q_TYPE_QH) | |
1937 | break; | |
1938 | prev = periodic_next_shadow(prev, type); | |
1939 | hw_p = &here.qh->hw_next; | |
1940 | here = *prev; | |
1941 | } | |
1942 | ||
1943 | /* sorting each branch by period (slow-->fast) | |
1944 | * enables sharing interior tree nodes | |
1945 | */ | |
1946 | while (here.ptr && qh != here.qh) { | |
1947 | if (qh->period > here.qh->period) | |
1948 | break; | |
1949 | prev = &here.qh->qh_next; | |
1950 | hw_p = &here.qh->hw_next; | |
1951 | here = *prev; | |
1952 | } | |
1953 | /* link in this qh, unless some earlier pass did that */ | |
1954 | if (qh != here.qh) { | |
1955 | qh->qh_next = here; | |
1956 | if (here.qh) | |
1957 | qh->hw_next = *hw_p; | |
1958 | wmb(); | |
1959 | prev->qh = qh; | |
1960 | *hw_p = QH_NEXT(qh->qh_dma); | |
1961 | } | |
1962 | } | |
1963 | qh->qh_state = QH_STATE_LINKED; | |
1964 | qh_get(qh); | |
1965 | ||
1966 | /* update per-qh bandwidth for usbfs */ | |
1967 | oxu_to_hcd(oxu)->self.bandwidth_allocated += qh->period | |
1968 | ? ((qh->usecs + qh->c_usecs) / qh->period) | |
1969 | : (qh->usecs * 8); | |
1970 | ||
1971 | /* maybe enable periodic schedule processing */ | |
1972 | if (!oxu->periodic_sched++) | |
1973 | return enable_periodic(oxu); | |
1974 | ||
1975 | return 0; | |
1976 | } | |
1977 | ||
1978 | static void qh_unlink_periodic(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
1979 | { | |
1980 | unsigned i; | |
1981 | unsigned period; | |
1982 | ||
1983 | /* FIXME: | |
1984 | * IF this isn't high speed | |
1985 | * and this qh is active in the current uframe | |
1986 | * (and overlay token SplitXstate is false?) | |
1987 | * THEN | |
551509d2 | 1988 | * qh->hw_info1 |= cpu_to_le32(1 << 7 "ignore"); |
b92a78e5 RG |
1989 | */ |
1990 | ||
1991 | /* high bandwidth, or otherwise part of every microframe */ | |
1992 | period = qh->period; | |
1993 | if (period == 0) | |
1994 | period = 1; | |
1995 | ||
1996 | for (i = qh->start; i < oxu->periodic_size; i += period) | |
1997 | periodic_unlink(oxu, i, qh); | |
1998 | ||
1999 | /* update per-qh bandwidth for usbfs */ | |
2000 | oxu_to_hcd(oxu)->self.bandwidth_allocated -= qh->period | |
2001 | ? ((qh->usecs + qh->c_usecs) / qh->period) | |
2002 | : (qh->usecs * 8); | |
2003 | ||
2004 | dev_dbg(&qh->dev->dev, | |
2005 | "unlink qh%d-%04x/%p start %d [%d/%d us]\n", | |
2006 | qh->period, | |
2007 | le32_to_cpup(&qh->hw_info2) & (QH_CMASK | QH_SMASK), | |
2008 | qh, qh->start, qh->usecs, qh->c_usecs); | |
2009 | ||
2010 | /* qh->qh_next still "live" to HC */ | |
2011 | qh->qh_state = QH_STATE_UNLINK; | |
2012 | qh->qh_next.ptr = NULL; | |
2013 | qh_put(qh); | |
2014 | ||
2015 | /* maybe turn off periodic schedule */ | |
2016 | oxu->periodic_sched--; | |
2017 | if (!oxu->periodic_sched) | |
2018 | (void) disable_periodic(oxu); | |
2019 | } | |
2020 | ||
2021 | static void intr_deschedule(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
2022 | { | |
2023 | unsigned wait; | |
2024 | ||
2025 | qh_unlink_periodic(oxu, qh); | |
2026 | ||
2027 | /* simple/paranoid: always delay, expecting the HC needs to read | |
2028 | * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and | |
37ebb549 | 2029 | * expect hub_wq to clean up after any CSPLITs we won't issue. |
b92a78e5 RG |
2030 | * active high speed queues may need bigger delays... |
2031 | */ | |
2032 | if (list_empty(&qh->qtd_list) | |
551509d2 | 2033 | || (cpu_to_le32(QH_CMASK) & qh->hw_info2) != 0) |
b92a78e5 RG |
2034 | wait = 2; |
2035 | else | |
2036 | wait = 55; /* worst case: 3 * 1024 */ | |
2037 | ||
2038 | udelay(wait); | |
2039 | qh->qh_state = QH_STATE_IDLE; | |
2040 | qh->hw_next = EHCI_LIST_END; | |
2041 | wmb(); | |
2042 | } | |
2043 | ||
2044 | static int check_period(struct oxu_hcd *oxu, | |
2045 | unsigned frame, unsigned uframe, | |
2046 | unsigned period, unsigned usecs) | |
2047 | { | |
2048 | int claimed; | |
2049 | ||
2050 | /* complete split running into next frame? | |
2051 | * given FSTN support, we could sometimes check... | |
2052 | */ | |
2053 | if (uframe >= 8) | |
2054 | return 0; | |
2055 | ||
2056 | /* | |
2057 | * 80% periodic == 100 usec/uframe available | |
2058 | * convert "usecs we need" to "max already claimed" | |
2059 | */ | |
2060 | usecs = 100 - usecs; | |
2061 | ||
2062 | /* we "know" 2 and 4 uframe intervals were rejected; so | |
2063 | * for period 0, check _every_ microframe in the schedule. | |
2064 | */ | |
2065 | if (unlikely(period == 0)) { | |
2066 | do { | |
2067 | for (uframe = 0; uframe < 7; uframe++) { | |
2068 | claimed = periodic_usecs(oxu, frame, uframe); | |
2069 | if (claimed > usecs) | |
2070 | return 0; | |
2071 | } | |
2072 | } while ((frame += 1) < oxu->periodic_size); | |
2073 | ||
2074 | /* just check the specified uframe, at that period */ | |
2075 | } else { | |
2076 | do { | |
2077 | claimed = periodic_usecs(oxu, frame, uframe); | |
2078 | if (claimed > usecs) | |
2079 | return 0; | |
2080 | } while ((frame += period) < oxu->periodic_size); | |
2081 | } | |
2082 | ||
2083 | return 1; | |
2084 | } | |
2085 | ||
2086 | static int check_intr_schedule(struct oxu_hcd *oxu, | |
2087 | unsigned frame, unsigned uframe, | |
2088 | const struct ehci_qh *qh, __le32 *c_maskp) | |
2089 | { | |
2090 | int retval = -ENOSPC; | |
2091 | ||
2092 | if (qh->c_usecs && uframe >= 6) /* FSTN territory? */ | |
2093 | goto done; | |
2094 | ||
2095 | if (!check_period(oxu, frame, uframe, qh->period, qh->usecs)) | |
2096 | goto done; | |
2097 | if (!qh->c_usecs) { | |
2098 | retval = 0; | |
2099 | *c_maskp = 0; | |
2100 | goto done; | |
2101 | } | |
2102 | ||
2103 | done: | |
2104 | return retval; | |
2105 | } | |
2106 | ||
2107 | /* "first fit" scheduling policy used the first time through, | |
2108 | * or when the previous schedule slot can't be re-used. | |
2109 | */ | |
2110 | static int qh_schedule(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
2111 | { | |
2112 | int status; | |
2113 | unsigned uframe; | |
2114 | __le32 c_mask; | |
2115 | unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ | |
2116 | ||
2117 | qh_refresh(oxu, qh); | |
2118 | qh->hw_next = EHCI_LIST_END; | |
2119 | frame = qh->start; | |
2120 | ||
2121 | /* reuse the previous schedule slots, if we can */ | |
2122 | if (frame < qh->period) { | |
2123 | uframe = ffs(le32_to_cpup(&qh->hw_info2) & QH_SMASK); | |
2124 | status = check_intr_schedule(oxu, frame, --uframe, | |
2125 | qh, &c_mask); | |
2126 | } else { | |
2127 | uframe = 0; | |
2128 | c_mask = 0; | |
2129 | status = -ENOSPC; | |
2130 | } | |
2131 | ||
2132 | /* else scan the schedule to find a group of slots such that all | |
2133 | * uframes have enough periodic bandwidth available. | |
2134 | */ | |
2135 | if (status) { | |
2136 | /* "normal" case, uframing flexible except with splits */ | |
2137 | if (qh->period) { | |
2138 | frame = qh->period - 1; | |
2139 | do { | |
2140 | for (uframe = 0; uframe < 8; uframe++) { | |
2141 | status = check_intr_schedule(oxu, | |
2142 | frame, uframe, qh, | |
2143 | &c_mask); | |
2144 | if (status == 0) | |
2145 | break; | |
2146 | } | |
2147 | } while (status && frame--); | |
2148 | ||
2149 | /* qh->period == 0 means every uframe */ | |
2150 | } else { | |
2151 | frame = 0; | |
2152 | status = check_intr_schedule(oxu, 0, 0, qh, &c_mask); | |
2153 | } | |
2154 | if (status) | |
2155 | goto done; | |
2156 | qh->start = frame; | |
2157 | ||
2158 | /* reset S-frame and (maybe) C-frame masks */ | |
551509d2 | 2159 | qh->hw_info2 &= cpu_to_le32(~(QH_CMASK | QH_SMASK)); |
b92a78e5 RG |
2160 | qh->hw_info2 |= qh->period |
2161 | ? cpu_to_le32(1 << uframe) | |
551509d2 | 2162 | : cpu_to_le32(QH_SMASK); |
b92a78e5 RG |
2163 | qh->hw_info2 |= c_mask; |
2164 | } else | |
2165 | oxu_dbg(oxu, "reused qh %p schedule\n", qh); | |
2166 | ||
2167 | /* stuff into the periodic schedule */ | |
2168 | status = qh_link_periodic(oxu, qh); | |
2169 | done: | |
2170 | return status; | |
2171 | } | |
2172 | ||
2173 | static int intr_submit(struct oxu_hcd *oxu, struct urb *urb, | |
2174 | struct list_head *qtd_list, gfp_t mem_flags) | |
2175 | { | |
2176 | unsigned epnum; | |
2177 | unsigned long flags; | |
2178 | struct ehci_qh *qh; | |
2179 | int status = 0; | |
2180 | struct list_head empty; | |
2181 | ||
2182 | /* get endpoint and transfer/schedule data */ | |
2183 | epnum = urb->ep->desc.bEndpointAddress; | |
2184 | ||
2185 | spin_lock_irqsave(&oxu->lock, flags); | |
2186 | ||
541c7d43 | 2187 | if (unlikely(!HCD_HW_ACCESSIBLE(oxu_to_hcd(oxu)))) { |
b92a78e5 RG |
2188 | status = -ESHUTDOWN; |
2189 | goto done; | |
2190 | } | |
2191 | ||
2192 | /* get qh and force any scheduling errors */ | |
2193 | INIT_LIST_HEAD(&empty); | |
2194 | qh = qh_append_tds(oxu, urb, &empty, epnum, &urb->ep->hcpriv); | |
2195 | if (qh == NULL) { | |
2196 | status = -ENOMEM; | |
2197 | goto done; | |
2198 | } | |
2199 | if (qh->qh_state == QH_STATE_IDLE) { | |
2200 | status = qh_schedule(oxu, qh); | |
2201 | if (status != 0) | |
2202 | goto done; | |
2203 | } | |
2204 | ||
2205 | /* then queue the urb's tds to the qh */ | |
2206 | qh = qh_append_tds(oxu, urb, qtd_list, epnum, &urb->ep->hcpriv); | |
2207 | BUG_ON(qh == NULL); | |
2208 | ||
2209 | /* ... update usbfs periodic stats */ | |
2210 | oxu_to_hcd(oxu)->self.bandwidth_int_reqs++; | |
2211 | ||
2212 | done: | |
2213 | spin_unlock_irqrestore(&oxu->lock, flags); | |
2214 | if (status) | |
2215 | qtd_list_free(oxu, urb, qtd_list); | |
2216 | ||
2217 | return status; | |
2218 | } | |
2219 | ||
2220 | static inline int itd_submit(struct oxu_hcd *oxu, struct urb *urb, | |
2221 | gfp_t mem_flags) | |
2222 | { | |
2223 | oxu_dbg(oxu, "iso support is missing!\n"); | |
2224 | return -ENOSYS; | |
2225 | } | |
2226 | ||
2227 | static inline int sitd_submit(struct oxu_hcd *oxu, struct urb *urb, | |
2228 | gfp_t mem_flags) | |
2229 | { | |
2230 | oxu_dbg(oxu, "split iso support is missing!\n"); | |
2231 | return -ENOSYS; | |
2232 | } | |
2233 | ||
2234 | static void scan_periodic(struct oxu_hcd *oxu) | |
2235 | { | |
2236 | unsigned frame, clock, now_uframe, mod; | |
2237 | unsigned modified; | |
2238 | ||
2239 | mod = oxu->periodic_size << 3; | |
2240 | ||
2241 | /* | |
2242 | * When running, scan from last scan point up to "now" | |
2243 | * else clean up by scanning everything that's left. | |
2244 | * Touches as few pages as possible: cache-friendly. | |
2245 | */ | |
2246 | now_uframe = oxu->next_uframe; | |
2247 | if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) | |
2248 | clock = readl(&oxu->regs->frame_index); | |
2249 | else | |
2250 | clock = now_uframe + mod - 1; | |
2251 | clock %= mod; | |
2252 | ||
2253 | for (;;) { | |
2254 | union ehci_shadow q, *q_p; | |
2255 | __le32 type, *hw_p; | |
b92a78e5 RG |
2256 | |
2257 | /* don't scan past the live uframe */ | |
2258 | frame = now_uframe >> 3; | |
8e9fd85c | 2259 | if (frame != (clock >> 3)) { |
b92a78e5 RG |
2260 | /* safe to scan the whole frame at once */ |
2261 | now_uframe |= 0x07; | |
b92a78e5 RG |
2262 | } |
2263 | ||
2264 | restart: | |
2265 | /* scan each element in frame's queue for completions */ | |
2266 | q_p = &oxu->pshadow[frame]; | |
2267 | hw_p = &oxu->periodic[frame]; | |
2268 | q.ptr = q_p->ptr; | |
2269 | type = Q_NEXT_TYPE(*hw_p); | |
2270 | modified = 0; | |
2271 | ||
2272 | while (q.ptr != NULL) { | |
2273 | union ehci_shadow temp; | |
b92a78e5 | 2274 | |
b92a78e5 RG |
2275 | switch (type) { |
2276 | case Q_TYPE_QH: | |
2277 | /* handle any completions */ | |
2278 | temp.qh = qh_get(q.qh); | |
2279 | type = Q_NEXT_TYPE(q.qh->hw_next); | |
2280 | q = q.qh->qh_next; | |
2281 | modified = qh_completions(oxu, temp.qh); | |
2282 | if (unlikely(list_empty(&temp.qh->qtd_list))) | |
2283 | intr_deschedule(oxu, temp.qh); | |
2284 | qh_put(temp.qh); | |
2285 | break; | |
2286 | default: | |
b5f5bfe0 | 2287 | oxu_dbg(oxu, "corrupt type %d frame %d shadow %p\n", |
b92a78e5 RG |
2288 | type, frame, q.ptr); |
2289 | q.ptr = NULL; | |
2290 | } | |
2291 | ||
2292 | /* assume completion callbacks modify the queue */ | |
2293 | if (unlikely(modified)) | |
2294 | goto restart; | |
2295 | } | |
2296 | ||
2297 | /* Stop when we catch up to the HC */ | |
2298 | ||
2299 | /* FIXME: this assumes we won't get lapped when | |
2300 | * latencies climb; that should be rare, but... | |
2301 | * detect it, and just go all the way around. | |
2302 | * FLR might help detect this case, so long as latencies | |
2303 | * don't exceed periodic_size msec (default 1.024 sec). | |
2304 | */ | |
2305 | ||
2306 | /* FIXME: likewise assumes HC doesn't halt mid-scan */ | |
2307 | ||
2308 | if (now_uframe == clock) { | |
2309 | unsigned now; | |
2310 | ||
2311 | if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) | |
2312 | break; | |
2313 | oxu->next_uframe = now_uframe; | |
2314 | now = readl(&oxu->regs->frame_index) % mod; | |
2315 | if (now_uframe == now) | |
2316 | break; | |
2317 | ||
2318 | /* rescan the rest of this frame, then ... */ | |
2319 | clock = now; | |
2320 | } else { | |
2321 | now_uframe++; | |
2322 | now_uframe %= mod; | |
2323 | } | |
2324 | } | |
2325 | } | |
2326 | ||
2327 | /* On some systems, leaving remote wakeup enabled prevents system shutdown. | |
2328 | * The firmware seems to think that powering off is a wakeup event! | |
2329 | * This routine turns off remote wakeup and everything else, on all ports. | |
2330 | */ | |
2331 | static void ehci_turn_off_all_ports(struct oxu_hcd *oxu) | |
2332 | { | |
2333 | int port = HCS_N_PORTS(oxu->hcs_params); | |
2334 | ||
2335 | while (port--) | |
2336 | writel(PORT_RWC_BITS, &oxu->regs->port_status[port]); | |
2337 | } | |
2338 | ||
2339 | static void ehci_port_power(struct oxu_hcd *oxu, int is_on) | |
2340 | { | |
2341 | unsigned port; | |
2342 | ||
2343 | if (!HCS_PPC(oxu->hcs_params)) | |
2344 | return; | |
2345 | ||
2346 | oxu_dbg(oxu, "...power%s ports...\n", is_on ? "up" : "down"); | |
2347 | for (port = HCS_N_PORTS(oxu->hcs_params); port > 0; ) | |
2348 | (void) oxu_hub_control(oxu_to_hcd(oxu), | |
2349 | is_on ? SetPortFeature : ClearPortFeature, | |
2350 | USB_PORT_FEAT_POWER, | |
2351 | port--, NULL, 0); | |
2352 | msleep(20); | |
2353 | } | |
2354 | ||
2355 | /* Called from some interrupts, timers, and so on. | |
2356 | * It calls driver completion functions, after dropping oxu->lock. | |
2357 | */ | |
2358 | static void ehci_work(struct oxu_hcd *oxu) | |
2359 | { | |
2360 | timer_action_done(oxu, TIMER_IO_WATCHDOG); | |
2361 | if (oxu->reclaim_ready) | |
2362 | end_unlink_async(oxu); | |
2363 | ||
2364 | /* another CPU may drop oxu->lock during a schedule scan while | |
2365 | * it reports urb completions. this flag guards against bogus | |
2366 | * attempts at re-entrant schedule scanning. | |
2367 | */ | |
2368 | if (oxu->scanning) | |
2369 | return; | |
2370 | oxu->scanning = 1; | |
2371 | scan_async(oxu); | |
2372 | if (oxu->next_uframe != -1) | |
2373 | scan_periodic(oxu); | |
2374 | oxu->scanning = 0; | |
2375 | ||
2376 | /* the IO watchdog guards against hardware or driver bugs that | |
2377 | * misplace IRQs, and should let us run completely without IRQs. | |
2378 | * such lossage has been observed on both VT6202 and VT8235. | |
2379 | */ | |
2380 | if (HC_IS_RUNNING(oxu_to_hcd(oxu)->state) && | |
2381 | (oxu->async->qh_next.ptr != NULL || | |
2382 | oxu->periodic_sched != 0)) | |
2383 | timer_action(oxu, TIMER_IO_WATCHDOG); | |
2384 | } | |
2385 | ||
2386 | static void unlink_async(struct oxu_hcd *oxu, struct ehci_qh *qh) | |
2387 | { | |
2388 | /* if we need to use IAA and it's busy, defer */ | |
2389 | if (qh->qh_state == QH_STATE_LINKED | |
2390 | && oxu->reclaim | |
2391 | && HC_IS_RUNNING(oxu_to_hcd(oxu)->state)) { | |
2392 | struct ehci_qh *last; | |
2393 | ||
2394 | for (last = oxu->reclaim; | |
2395 | last->reclaim; | |
2396 | last = last->reclaim) | |
2397 | continue; | |
2398 | qh->qh_state = QH_STATE_UNLINK_WAIT; | |
2399 | last->reclaim = qh; | |
2400 | ||
2401 | /* bypass IAA if the hc can't care */ | |
2402 | } else if (!HC_IS_RUNNING(oxu_to_hcd(oxu)->state) && oxu->reclaim) | |
2403 | end_unlink_async(oxu); | |
2404 | ||
2405 | /* something else might have unlinked the qh by now */ | |
2406 | if (qh->qh_state == QH_STATE_LINKED) | |
2407 | start_unlink_async(oxu, qh); | |
2408 | } | |
2409 | ||
2410 | /* | |
2411 | * USB host controller methods | |
2412 | */ | |
2413 | ||
2414 | static irqreturn_t oxu210_hcd_irq(struct usb_hcd *hcd) | |
2415 | { | |
2416 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2417 | u32 status, pcd_status = 0; | |
2418 | int bh; | |
2419 | ||
2420 | spin_lock(&oxu->lock); | |
2421 | ||
2422 | status = readl(&oxu->regs->status); | |
2423 | ||
2424 | /* e.g. cardbus physical eject */ | |
2425 | if (status == ~(u32) 0) { | |
2426 | oxu_dbg(oxu, "device removed\n"); | |
2427 | goto dead; | |
2428 | } | |
2429 | ||
69fff59d | 2430 | /* Shared IRQ? */ |
b92a78e5 | 2431 | status &= INTR_MASK; |
69fff59d | 2432 | if (!status || unlikely(hcd->state == HC_STATE_HALT)) { |
b92a78e5 RG |
2433 | spin_unlock(&oxu->lock); |
2434 | return IRQ_NONE; | |
2435 | } | |
2436 | ||
2437 | /* clear (just) interrupts */ | |
2438 | writel(status, &oxu->regs->status); | |
2439 | readl(&oxu->regs->command); /* unblock posted write */ | |
2440 | bh = 0; | |
2441 | ||
2442 | #ifdef OXU_VERBOSE_DEBUG | |
2443 | /* unrequested/ignored: Frame List Rollover */ | |
2444 | dbg_status(oxu, "irq", status); | |
2445 | #endif | |
2446 | ||
2447 | /* INT, ERR, and IAA interrupt rates can be throttled */ | |
2448 | ||
2449 | /* normal [4.15.1.2] or error [4.15.1.1] completion */ | |
2450 | if (likely((status & (STS_INT|STS_ERR)) != 0)) | |
2451 | bh = 1; | |
2452 | ||
2453 | /* complete the unlinking of some qh [4.15.2.3] */ | |
2454 | if (status & STS_IAA) { | |
2455 | oxu->reclaim_ready = 1; | |
2456 | bh = 1; | |
2457 | } | |
2458 | ||
2459 | /* remote wakeup [4.3.1] */ | |
2460 | if (status & STS_PCD) { | |
2461 | unsigned i = HCS_N_PORTS(oxu->hcs_params); | |
2462 | pcd_status = status; | |
2463 | ||
2464 | /* resume root hub? */ | |
2465 | if (!(readl(&oxu->regs->command) & CMD_RUN)) | |
2466 | usb_hcd_resume_root_hub(hcd); | |
2467 | ||
2468 | while (i--) { | |
2469 | int pstatus = readl(&oxu->regs->port_status[i]); | |
2470 | ||
2471 | if (pstatus & PORT_OWNER) | |
2472 | continue; | |
2473 | if (!(pstatus & PORT_RESUME) | |
2474 | || oxu->reset_done[i] != 0) | |
2475 | continue; | |
2476 | ||
84c0d178 FB |
2477 | /* start USB_RESUME_TIMEOUT resume signaling from this |
2478 | * port, and make hub_wq collect PORT_STAT_C_SUSPEND to | |
b92a78e5 RG |
2479 | * stop that signaling. |
2480 | */ | |
84c0d178 FB |
2481 | oxu->reset_done[i] = jiffies + |
2482 | msecs_to_jiffies(USB_RESUME_TIMEOUT); | |
b92a78e5 RG |
2483 | oxu_dbg(oxu, "port %d remote wakeup\n", i + 1); |
2484 | mod_timer(&hcd->rh_timer, oxu->reset_done[i]); | |
2485 | } | |
2486 | } | |
2487 | ||
2488 | /* PCI errors [4.15.2.4] */ | |
2489 | if (unlikely((status & STS_FATAL) != 0)) { | |
2490 | /* bogus "fatal" IRQs appear on some chips... why? */ | |
2491 | status = readl(&oxu->regs->status); | |
2492 | dbg_cmd(oxu, "fatal", readl(&oxu->regs->command)); | |
2493 | dbg_status(oxu, "fatal", status); | |
2494 | if (status & STS_HALT) { | |
2495 | oxu_err(oxu, "fatal error\n"); | |
2496 | dead: | |
2497 | ehci_reset(oxu); | |
2498 | writel(0, &oxu->regs->configured_flag); | |
69fff59d | 2499 | usb_hc_died(hcd); |
b92a78e5 RG |
2500 | /* generic layer kills/unlinks all urbs, then |
2501 | * uses oxu_stop to clean up the rest | |
2502 | */ | |
2503 | bh = 1; | |
2504 | } | |
2505 | } | |
2506 | ||
2507 | if (bh) | |
2508 | ehci_work(oxu); | |
2509 | spin_unlock(&oxu->lock); | |
2510 | if (pcd_status & STS_PCD) | |
2511 | usb_hcd_poll_rh_status(hcd); | |
2512 | return IRQ_HANDLED; | |
2513 | } | |
2514 | ||
2515 | static irqreturn_t oxu_irq(struct usb_hcd *hcd) | |
2516 | { | |
2517 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2518 | int ret = IRQ_HANDLED; | |
2519 | ||
2520 | u32 status = oxu_readl(hcd->regs, OXU_CHIPIRQSTATUS); | |
2521 | u32 enable = oxu_readl(hcd->regs, OXU_CHIPIRQEN_SET); | |
2522 | ||
2523 | /* Disable all interrupt */ | |
2524 | oxu_writel(hcd->regs, OXU_CHIPIRQEN_CLR, enable); | |
2525 | ||
2526 | if ((oxu->is_otg && (status & OXU_USBOTGI)) || | |
2527 | (!oxu->is_otg && (status & OXU_USBSPHI))) | |
2528 | oxu210_hcd_irq(hcd); | |
2529 | else | |
2530 | ret = IRQ_NONE; | |
2531 | ||
2532 | /* Enable all interrupt back */ | |
2533 | oxu_writel(hcd->regs, OXU_CHIPIRQEN_SET, enable); | |
2534 | ||
2535 | return ret; | |
2536 | } | |
2537 | ||
e99e88a9 | 2538 | static void oxu_watchdog(struct timer_list *t) |
b92a78e5 | 2539 | { |
e99e88a9 | 2540 | struct oxu_hcd *oxu = from_timer(oxu, t, watchdog); |
b92a78e5 RG |
2541 | unsigned long flags; |
2542 | ||
2543 | spin_lock_irqsave(&oxu->lock, flags); | |
2544 | ||
2545 | /* lost IAA irqs wedge things badly; seen with a vt8235 */ | |
2546 | if (oxu->reclaim) { | |
2547 | u32 status = readl(&oxu->regs->status); | |
2548 | if (status & STS_IAA) { | |
2549 | oxu_vdbg(oxu, "lost IAA\n"); | |
2550 | writel(STS_IAA, &oxu->regs->status); | |
2551 | oxu->reclaim_ready = 1; | |
2552 | } | |
2553 | } | |
2554 | ||
2555 | /* stop async processing after it's idled a bit */ | |
2556 | if (test_bit(TIMER_ASYNC_OFF, &oxu->actions)) | |
2557 | start_unlink_async(oxu, oxu->async); | |
2558 | ||
2559 | /* oxu could run by timer, without IRQs ... */ | |
2560 | ehci_work(oxu); | |
2561 | ||
2562 | spin_unlock_irqrestore(&oxu->lock, flags); | |
2563 | } | |
2564 | ||
2565 | /* One-time init, only for memory state. | |
2566 | */ | |
2567 | static int oxu_hcd_init(struct usb_hcd *hcd) | |
2568 | { | |
2569 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2570 | u32 temp; | |
2571 | int retval; | |
2572 | u32 hcc_params; | |
2573 | ||
2574 | spin_lock_init(&oxu->lock); | |
2575 | ||
e99e88a9 | 2576 | timer_setup(&oxu->watchdog, oxu_watchdog, 0); |
b92a78e5 RG |
2577 | |
2578 | /* | |
2579 | * hw default: 1K periodic list heads, one per frame. | |
2580 | * periodic_size can shrink by USBCMD update if hcc_params allows. | |
2581 | */ | |
2582 | oxu->periodic_size = DEFAULT_I_TDPS; | |
2583 | retval = ehci_mem_init(oxu, GFP_KERNEL); | |
2584 | if (retval < 0) | |
2585 | return retval; | |
2586 | ||
2587 | /* controllers may cache some of the periodic schedule ... */ | |
2588 | hcc_params = readl(&oxu->caps->hcc_params); | |
2589 | if (HCC_ISOC_CACHE(hcc_params)) /* full frame cache */ | |
2590 | oxu->i_thresh = 8; | |
2591 | else /* N microframes cached */ | |
2592 | oxu->i_thresh = 2 + HCC_ISOC_THRES(hcc_params); | |
2593 | ||
2594 | oxu->reclaim = NULL; | |
2595 | oxu->reclaim_ready = 0; | |
2596 | oxu->next_uframe = -1; | |
2597 | ||
2598 | /* | |
2599 | * dedicate a qh for the async ring head, since we couldn't unlink | |
2600 | * a 'real' qh without stopping the async schedule [4.8]. use it | |
2601 | * as the 'reclamation list head' too. | |
2602 | * its dummy is used in hw_alt_next of many tds, to prevent the qh | |
2603 | * from automatically advancing to the next td after short reads. | |
2604 | */ | |
2605 | oxu->async->qh_next.qh = NULL; | |
2606 | oxu->async->hw_next = QH_NEXT(oxu->async->qh_dma); | |
2607 | oxu->async->hw_info1 = cpu_to_le32(QH_HEAD); | |
2608 | oxu->async->hw_token = cpu_to_le32(QTD_STS_HALT); | |
2609 | oxu->async->hw_qtd_next = EHCI_LIST_END; | |
2610 | oxu->async->qh_state = QH_STATE_LINKED; | |
2611 | oxu->async->hw_alt_next = QTD_NEXT(oxu->async->dummy->qtd_dma); | |
2612 | ||
2613 | /* clear interrupt enables, set irq latency */ | |
2614 | if (log2_irq_thresh < 0 || log2_irq_thresh > 6) | |
2615 | log2_irq_thresh = 0; | |
2616 | temp = 1 << (16 + log2_irq_thresh); | |
2617 | if (HCC_CANPARK(hcc_params)) { | |
2618 | /* HW default park == 3, on hardware that supports it (like | |
2619 | * NVidia and ALI silicon), maximizes throughput on the async | |
2620 | * schedule by avoiding QH fetches between transfers. | |
2621 | * | |
2622 | * With fast usb storage devices and NForce2, "park" seems to | |
2623 | * make problems: throughput reduction (!), data errors... | |
2624 | */ | |
2625 | if (park) { | |
2626 | park = min(park, (unsigned) 3); | |
2627 | temp |= CMD_PARK; | |
2628 | temp |= park << 8; | |
2629 | } | |
2630 | oxu_dbg(oxu, "park %d\n", park); | |
2631 | } | |
2632 | if (HCC_PGM_FRAMELISTLEN(hcc_params)) { | |
2633 | /* periodic schedule size can be smaller than default */ | |
2634 | temp &= ~(3 << 2); | |
2635 | temp |= (EHCI_TUNE_FLS << 2); | |
2636 | } | |
2637 | oxu->command = temp; | |
2638 | ||
2639 | return 0; | |
2640 | } | |
2641 | ||
2642 | /* Called during probe() after chip reset completes. | |
2643 | */ | |
2644 | static int oxu_reset(struct usb_hcd *hcd) | |
2645 | { | |
2646 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
b92a78e5 RG |
2647 | |
2648 | spin_lock_init(&oxu->mem_lock); | |
2649 | INIT_LIST_HEAD(&oxu->urb_list); | |
2650 | oxu->urb_len = 0; | |
2651 | ||
2652 | /* FIMXE */ | |
a9f8ec4d | 2653 | hcd->self.controller->dma_mask = NULL; |
b92a78e5 RG |
2654 | |
2655 | if (oxu->is_otg) { | |
2656 | oxu->caps = hcd->regs + OXU_OTG_CAP_OFFSET; | |
2657 | oxu->regs = hcd->regs + OXU_OTG_CAP_OFFSET + \ | |
2658 | HC_LENGTH(readl(&oxu->caps->hc_capbase)); | |
2659 | ||
2660 | oxu->mem = hcd->regs + OXU_SPH_MEM; | |
2661 | } else { | |
2662 | oxu->caps = hcd->regs + OXU_SPH_CAP_OFFSET; | |
2663 | oxu->regs = hcd->regs + OXU_SPH_CAP_OFFSET + \ | |
2664 | HC_LENGTH(readl(&oxu->caps->hc_capbase)); | |
2665 | ||
2666 | oxu->mem = hcd->regs + OXU_OTG_MEM; | |
2667 | } | |
2668 | ||
2669 | oxu->hcs_params = readl(&oxu->caps->hcs_params); | |
2670 | oxu->sbrn = 0x20; | |
2671 | ||
aa31a090 | 2672 | return oxu_hcd_init(hcd); |
b92a78e5 RG |
2673 | } |
2674 | ||
2675 | static int oxu_run(struct usb_hcd *hcd) | |
2676 | { | |
2677 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2678 | int retval; | |
2679 | u32 temp, hcc_params; | |
2680 | ||
2681 | hcd->uses_new_polling = 1; | |
b92a78e5 RG |
2682 | |
2683 | /* EHCI spec section 4.1 */ | |
2684 | retval = ehci_reset(oxu); | |
2685 | if (retval != 0) { | |
2686 | ehci_mem_cleanup(oxu); | |
2687 | return retval; | |
2688 | } | |
2689 | writel(oxu->periodic_dma, &oxu->regs->frame_list); | |
2690 | writel((u32) oxu->async->qh_dma, &oxu->regs->async_next); | |
2691 | ||
2692 | /* hcc_params controls whether oxu->regs->segment must (!!!) | |
2693 | * be used; it constrains QH/ITD/SITD and QTD locations. | |
324c54fa | 2694 | * dma_pool consistent memory always uses segment zero. |
b92a78e5 RG |
2695 | * streaming mappings for I/O buffers, like pci_map_single(), |
2696 | * can return segments above 4GB, if the device allows. | |
2697 | * | |
0e77acef | 2698 | * NOTE: the dma mask is visible through dev->dma_mask, so |
b92a78e5 RG |
2699 | * drivers can pass this info along ... like NETIF_F_HIGHDMA, |
2700 | * Scsi_Host.highmem_io, and so forth. It's readonly to all | |
2701 | * host side drivers though. | |
2702 | */ | |
2703 | hcc_params = readl(&oxu->caps->hcc_params); | |
2704 | if (HCC_64BIT_ADDR(hcc_params)) | |
2705 | writel(0, &oxu->regs->segment); | |
2706 | ||
2707 | oxu->command &= ~(CMD_LRESET | CMD_IAAD | CMD_PSE | | |
2708 | CMD_ASE | CMD_RESET); | |
2709 | oxu->command |= CMD_RUN; | |
2710 | writel(oxu->command, &oxu->regs->command); | |
2711 | dbg_cmd(oxu, "init", oxu->command); | |
2712 | ||
2713 | /* | |
2714 | * Start, enabling full USB 2.0 functionality ... usb 1.1 devices | |
2715 | * are explicitly handed to companion controller(s), so no TT is | |
2716 | * involved with the root hub. (Except where one is integrated, | |
2717 | * and there's no companion controller unless maybe for USB OTG.) | |
2718 | */ | |
2719 | hcd->state = HC_STATE_RUNNING; | |
2720 | writel(FLAG_CF, &oxu->regs->configured_flag); | |
2721 | readl(&oxu->regs->command); /* unblock posted writes */ | |
2722 | ||
2723 | temp = HC_VERSION(readl(&oxu->caps->hc_capbase)); | |
2724 | oxu_info(oxu, "USB %x.%x started, quasi-EHCI %x.%02x, driver %s%s\n", | |
2725 | ((oxu->sbrn & 0xf0)>>4), (oxu->sbrn & 0x0f), | |
2726 | temp >> 8, temp & 0xff, DRIVER_VERSION, | |
2727 | ignore_oc ? ", overcurrent ignored" : ""); | |
2728 | ||
2729 | writel(INTR_MASK, &oxu->regs->intr_enable); /* Turn On Interrupts */ | |
2730 | ||
2731 | return 0; | |
2732 | } | |
2733 | ||
2734 | static void oxu_stop(struct usb_hcd *hcd) | |
2735 | { | |
2736 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2737 | ||
2738 | /* Turn off port power on all root hub ports. */ | |
2739 | ehci_port_power(oxu, 0); | |
2740 | ||
2741 | /* no more interrupts ... */ | |
2742 | del_timer_sync(&oxu->watchdog); | |
2743 | ||
2744 | spin_lock_irq(&oxu->lock); | |
2745 | if (HC_IS_RUNNING(hcd->state)) | |
2746 | ehci_quiesce(oxu); | |
2747 | ||
2748 | ehci_reset(oxu); | |
2749 | writel(0, &oxu->regs->intr_enable); | |
2750 | spin_unlock_irq(&oxu->lock); | |
2751 | ||
2752 | /* let companion controllers work when we aren't */ | |
2753 | writel(0, &oxu->regs->configured_flag); | |
2754 | ||
2755 | /* root hub is shut down separately (first, when possible) */ | |
2756 | spin_lock_irq(&oxu->lock); | |
2757 | if (oxu->async) | |
2758 | ehci_work(oxu); | |
2759 | spin_unlock_irq(&oxu->lock); | |
2760 | ehci_mem_cleanup(oxu); | |
2761 | ||
2762 | dbg_status(oxu, "oxu_stop completed", readl(&oxu->regs->status)); | |
2763 | } | |
2764 | ||
2765 | /* Kick in for silicon on any bus (not just pci, etc). | |
2766 | * This forcibly disables dma and IRQs, helping kexec and other cases | |
2767 | * where the next system software may expect clean state. | |
2768 | */ | |
2769 | static void oxu_shutdown(struct usb_hcd *hcd) | |
2770 | { | |
2771 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2772 | ||
2773 | (void) ehci_halt(oxu); | |
2774 | ehci_turn_off_all_ports(oxu); | |
2775 | ||
2776 | /* make BIOS/etc use companion controller during reboot */ | |
2777 | writel(0, &oxu->regs->configured_flag); | |
2778 | ||
2779 | /* unblock posted writes */ | |
2780 | readl(&oxu->regs->configured_flag); | |
2781 | } | |
2782 | ||
2783 | /* Non-error returns are a promise to giveback() the urb later | |
2784 | * we drop ownership so next owner (or urb unlink) can get it | |
2785 | * | |
2786 | * urb + dev is in hcd.self.controller.urb_list | |
2787 | * we're queueing TDs onto software and hardware lists | |
2788 | * | |
2789 | * hcd-specific init for hcpriv hasn't been done yet | |
2790 | * | |
2791 | * NOTE: control, bulk, and interrupt share the same code to append TDs | |
2792 | * to a (possibly active) QH, and the same QH scanning code. | |
2793 | */ | |
2794 | static int __oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, | |
2795 | gfp_t mem_flags) | |
2796 | { | |
2797 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2798 | struct list_head qtd_list; | |
2799 | ||
2800 | INIT_LIST_HEAD(&qtd_list); | |
2801 | ||
2802 | switch (usb_pipetype(urb->pipe)) { | |
2803 | case PIPE_CONTROL: | |
2804 | case PIPE_BULK: | |
2805 | default: | |
2806 | if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags)) | |
2807 | return -ENOMEM; | |
2808 | return submit_async(oxu, urb, &qtd_list, mem_flags); | |
2809 | ||
2810 | case PIPE_INTERRUPT: | |
2811 | if (!qh_urb_transaction(oxu, urb, &qtd_list, mem_flags)) | |
2812 | return -ENOMEM; | |
2813 | return intr_submit(oxu, urb, &qtd_list, mem_flags); | |
2814 | ||
2815 | case PIPE_ISOCHRONOUS: | |
2816 | if (urb->dev->speed == USB_SPEED_HIGH) | |
2817 | return itd_submit(oxu, urb, mem_flags); | |
2818 | else | |
2819 | return sitd_submit(oxu, urb, mem_flags); | |
2820 | } | |
2821 | } | |
2822 | ||
2823 | /* This function is responsible for breaking URBs with big data size | |
2824 | * into smaller size and processing small urbs in sequence. | |
2825 | */ | |
2826 | static int oxu_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, | |
2827 | gfp_t mem_flags) | |
2828 | { | |
2829 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2830 | int num, rem; | |
b92a78e5 RG |
2831 | void *transfer_buffer; |
2832 | struct urb *murb; | |
2833 | int i, ret; | |
2834 | ||
2835 | /* If not bulk pipe just enqueue the URB */ | |
2836 | if (!usb_pipebulk(urb->pipe)) | |
2837 | return __oxu_urb_enqueue(hcd, urb, mem_flags); | |
2838 | ||
2839 | /* Otherwise we should verify the USB transfer buffer size! */ | |
2840 | transfer_buffer = urb->transfer_buffer; | |
b92a78e5 RG |
2841 | |
2842 | num = urb->transfer_buffer_length / 4096; | |
2843 | rem = urb->transfer_buffer_length % 4096; | |
2844 | if (rem != 0) | |
2845 | num++; | |
2846 | ||
2847 | /* If URB is smaller than 4096 bytes just enqueue it! */ | |
2848 | if (num == 1) | |
2849 | return __oxu_urb_enqueue(hcd, urb, mem_flags); | |
2850 | ||
2851 | /* Ok, we have more job to do! :) */ | |
2852 | ||
2853 | for (i = 0; i < num - 1; i++) { | |
25985edc | 2854 | /* Get free micro URB poll till a free urb is received */ |
b92a78e5 RG |
2855 | |
2856 | do { | |
2857 | murb = (struct urb *) oxu_murb_alloc(oxu); | |
2858 | if (!murb) | |
2859 | schedule(); | |
2860 | } while (!murb); | |
2861 | ||
2862 | /* Coping the urb */ | |
2863 | memcpy(murb, urb, sizeof(struct urb)); | |
2864 | ||
2865 | murb->transfer_buffer_length = 4096; | |
2866 | murb->transfer_buffer = transfer_buffer + i * 4096; | |
2867 | ||
2868 | /* Null pointer for the encodes that this is a micro urb */ | |
2869 | murb->complete = NULL; | |
2870 | ||
2871 | ((struct oxu_murb *) murb)->main = urb; | |
2872 | ((struct oxu_murb *) murb)->last = 0; | |
2873 | ||
2874 | /* This loop is to guarantee urb to be processed when there's | |
2875 | * not enough resources at a particular time by retrying. | |
2876 | */ | |
2877 | do { | |
2878 | ret = __oxu_urb_enqueue(hcd, murb, mem_flags); | |
2879 | if (ret) | |
2880 | schedule(); | |
2881 | } while (ret); | |
2882 | } | |
2883 | ||
2884 | /* Last urb requires special handling */ | |
2885 | ||
25985edc | 2886 | /* Get free micro URB poll till a free urb is received */ |
b92a78e5 RG |
2887 | do { |
2888 | murb = (struct urb *) oxu_murb_alloc(oxu); | |
2889 | if (!murb) | |
2890 | schedule(); | |
2891 | } while (!murb); | |
2892 | ||
2893 | /* Coping the urb */ | |
2894 | memcpy(murb, urb, sizeof(struct urb)); | |
2895 | ||
2896 | murb->transfer_buffer_length = rem > 0 ? rem : 4096; | |
2897 | murb->transfer_buffer = transfer_buffer + (num - 1) * 4096; | |
2898 | ||
2899 | /* Null pointer for the encodes that this is a micro urb */ | |
2900 | murb->complete = NULL; | |
2901 | ||
2902 | ((struct oxu_murb *) murb)->main = urb; | |
2903 | ((struct oxu_murb *) murb)->last = 1; | |
2904 | ||
2905 | do { | |
2906 | ret = __oxu_urb_enqueue(hcd, murb, mem_flags); | |
2907 | if (ret) | |
2908 | schedule(); | |
2909 | } while (ret); | |
2910 | ||
2911 | return ret; | |
2912 | } | |
2913 | ||
2914 | /* Remove from hardware lists. | |
2915 | * Completions normally happen asynchronously | |
2916 | */ | |
2917 | static int oxu_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) | |
2918 | { | |
2919 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2920 | struct ehci_qh *qh; | |
2921 | unsigned long flags; | |
2922 | ||
2923 | spin_lock_irqsave(&oxu->lock, flags); | |
2924 | switch (usb_pipetype(urb->pipe)) { | |
2925 | case PIPE_CONTROL: | |
2926 | case PIPE_BULK: | |
2927 | default: | |
2928 | qh = (struct ehci_qh *) urb->hcpriv; | |
2929 | if (!qh) | |
2930 | break; | |
2931 | unlink_async(oxu, qh); | |
2932 | break; | |
2933 | ||
2934 | case PIPE_INTERRUPT: | |
2935 | qh = (struct ehci_qh *) urb->hcpriv; | |
2936 | if (!qh) | |
2937 | break; | |
2938 | switch (qh->qh_state) { | |
2939 | case QH_STATE_LINKED: | |
2940 | intr_deschedule(oxu, qh); | |
2941 | /* FALL THROUGH */ | |
2942 | case QH_STATE_IDLE: | |
2943 | qh_completions(oxu, qh); | |
2944 | break; | |
2945 | default: | |
2946 | oxu_dbg(oxu, "bogus qh %p state %d\n", | |
2947 | qh, qh->qh_state); | |
2948 | goto done; | |
2949 | } | |
2950 | ||
2951 | /* reschedule QH iff another request is queued */ | |
2952 | if (!list_empty(&qh->qtd_list) | |
2953 | && HC_IS_RUNNING(hcd->state)) { | |
2954 | int status; | |
2955 | ||
2956 | status = qh_schedule(oxu, qh); | |
2957 | spin_unlock_irqrestore(&oxu->lock, flags); | |
2958 | ||
2959 | if (status != 0) { | |
2960 | /* shouldn't happen often, but ... | |
2961 | * FIXME kill those tds' urbs | |
2962 | */ | |
68980793 GKH |
2963 | dev_err(hcd->self.controller, |
2964 | "can't reschedule qh %p, err %d\n", qh, | |
2965 | status); | |
b92a78e5 RG |
2966 | } |
2967 | return status; | |
2968 | } | |
2969 | break; | |
2970 | } | |
2971 | done: | |
2972 | spin_unlock_irqrestore(&oxu->lock, flags); | |
2973 | return 0; | |
2974 | } | |
2975 | ||
2976 | /* Bulk qh holds the data toggle */ | |
2977 | static void oxu_endpoint_disable(struct usb_hcd *hcd, | |
2978 | struct usb_host_endpoint *ep) | |
2979 | { | |
2980 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
2981 | unsigned long flags; | |
2982 | struct ehci_qh *qh, *tmp; | |
2983 | ||
2984 | /* ASSERT: any requests/urbs are being unlinked */ | |
2985 | /* ASSERT: nobody can be submitting urbs for this any more */ | |
2986 | ||
2987 | rescan: | |
2988 | spin_lock_irqsave(&oxu->lock, flags); | |
2989 | qh = ep->hcpriv; | |
2990 | if (!qh) | |
2991 | goto done; | |
2992 | ||
2993 | /* endpoints can be iso streams. for now, we don't | |
2994 | * accelerate iso completions ... so spin a while. | |
2995 | */ | |
2996 | if (qh->hw_info1 == 0) { | |
2997 | oxu_vdbg(oxu, "iso delay\n"); | |
2998 | goto idle_timeout; | |
2999 | } | |
3000 | ||
3001 | if (!HC_IS_RUNNING(hcd->state)) | |
3002 | qh->qh_state = QH_STATE_IDLE; | |
3003 | switch (qh->qh_state) { | |
3004 | case QH_STATE_LINKED: | |
3005 | for (tmp = oxu->async->qh_next.qh; | |
3006 | tmp && tmp != qh; | |
3007 | tmp = tmp->qh_next.qh) | |
3008 | continue; | |
3009 | /* periodic qh self-unlinks on empty */ | |
3010 | if (!tmp) | |
3011 | goto nogood; | |
3012 | unlink_async(oxu, qh); | |
3013 | /* FALL THROUGH */ | |
3014 | case QH_STATE_UNLINK: /* wait for hw to finish? */ | |
3015 | idle_timeout: | |
3016 | spin_unlock_irqrestore(&oxu->lock, flags); | |
3017 | schedule_timeout_uninterruptible(1); | |
3018 | goto rescan; | |
3019 | case QH_STATE_IDLE: /* fully unlinked */ | |
3020 | if (list_empty(&qh->qtd_list)) { | |
3021 | qh_put(qh); | |
3022 | break; | |
3023 | } | |
8787971e | 3024 | /* fall through */ |
b92a78e5 RG |
3025 | default: |
3026 | nogood: | |
3027 | /* caller was supposed to have unlinked any requests; | |
3028 | * that's not our job. just leak this memory. | |
3029 | */ | |
3030 | oxu_err(oxu, "qh %p (#%02x) state %d%s\n", | |
3031 | qh, ep->desc.bEndpointAddress, qh->qh_state, | |
3032 | list_empty(&qh->qtd_list) ? "" : "(has tds)"); | |
3033 | break; | |
3034 | } | |
3035 | ep->hcpriv = NULL; | |
3036 | done: | |
3037 | spin_unlock_irqrestore(&oxu->lock, flags); | |
b92a78e5 RG |
3038 | } |
3039 | ||
3040 | static int oxu_get_frame(struct usb_hcd *hcd) | |
3041 | { | |
3042 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
3043 | ||
3044 | return (readl(&oxu->regs->frame_index) >> 3) % | |
3045 | oxu->periodic_size; | |
3046 | } | |
3047 | ||
3048 | /* Build "status change" packet (one or two bytes) from HC registers */ | |
3049 | static int oxu_hub_status_data(struct usb_hcd *hcd, char *buf) | |
3050 | { | |
3051 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
3052 | u32 temp, mask, status = 0; | |
3053 | int ports, i, retval = 1; | |
3054 | unsigned long flags; | |
3055 | ||
464ed18e | 3056 | /* if !PM, root hub timers won't get shut down ... */ |
b92a78e5 RG |
3057 | if (!HC_IS_RUNNING(hcd->state)) |
3058 | return 0; | |
3059 | ||
3060 | /* init status to no-changes */ | |
3061 | buf[0] = 0; | |
3062 | ports = HCS_N_PORTS(oxu->hcs_params); | |
3063 | if (ports > 7) { | |
3064 | buf[1] = 0; | |
3065 | retval++; | |
3066 | } | |
3067 | ||
3068 | /* Some boards (mostly VIA?) report bogus overcurrent indications, | |
3069 | * causing massive log spam unless we completely ignore them. It | |
b595076a | 3070 | * may be relevant that VIA VT8235 controllers, where PORT_POWER is |
b92a78e5 RG |
3071 | * always set, seem to clear PORT_OCC and PORT_CSC when writing to |
3072 | * PORT_POWER; that's surprising, but maybe within-spec. | |
3073 | */ | |
3074 | if (!ignore_oc) | |
3075 | mask = PORT_CSC | PORT_PEC | PORT_OCC; | |
3076 | else | |
3077 | mask = PORT_CSC | PORT_PEC; | |
3078 | ||
3079 | /* no hub change reports (bit 0) for now (power, ...) */ | |
3080 | ||
3081 | /* port N changes (bit N)? */ | |
3082 | spin_lock_irqsave(&oxu->lock, flags); | |
3083 | for (i = 0; i < ports; i++) { | |
3084 | temp = readl(&oxu->regs->port_status[i]); | |
3085 | ||
3086 | /* | |
3087 | * Return status information even for ports with OWNER set. | |
37ebb549 | 3088 | * Otherwise hub_wq wouldn't see the disconnect event when a |
b92a78e5 RG |
3089 | * high-speed device is switched over to the companion |
3090 | * controller by the user. | |
3091 | */ | |
3092 | ||
3093 | if (!(temp & PORT_CONNECT)) | |
3094 | oxu->reset_done[i] = 0; | |
3095 | if ((temp & mask) != 0 || ((temp & PORT_RESUME) != 0 && | |
3096 | time_after_eq(jiffies, oxu->reset_done[i]))) { | |
3097 | if (i < 7) | |
3098 | buf[0] |= 1 << (i + 1); | |
3099 | else | |
3100 | buf[1] |= 1 << (i - 7); | |
3101 | status = STS_PCD; | |
3102 | } | |
3103 | } | |
3104 | /* FIXME autosuspend idle root hubs */ | |
3105 | spin_unlock_irqrestore(&oxu->lock, flags); | |
3106 | return status ? retval : 0; | |
3107 | } | |
3108 | ||
3109 | /* Returns the speed of a device attached to a port on the root hub. */ | |
3110 | static inline unsigned int oxu_port_speed(struct oxu_hcd *oxu, | |
3111 | unsigned int portsc) | |
3112 | { | |
3113 | switch ((portsc >> 26) & 3) { | |
3114 | case 0: | |
3115 | return 0; | |
3116 | case 1: | |
288ead45 | 3117 | return USB_PORT_STAT_LOW_SPEED; |
b92a78e5 RG |
3118 | case 2: |
3119 | default: | |
288ead45 | 3120 | return USB_PORT_STAT_HIGH_SPEED; |
b92a78e5 RG |
3121 | } |
3122 | } | |
3123 | ||
3124 | #define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E) | |
3125 | static int oxu_hub_control(struct usb_hcd *hcd, u16 typeReq, | |
3126 | u16 wValue, u16 wIndex, char *buf, u16 wLength) | |
3127 | { | |
3128 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
3129 | int ports = HCS_N_PORTS(oxu->hcs_params); | |
3130 | u32 __iomem *status_reg = &oxu->regs->port_status[wIndex - 1]; | |
3131 | u32 temp, status; | |
3132 | unsigned long flags; | |
3133 | int retval = 0; | |
3134 | unsigned selector; | |
3135 | ||
3136 | /* | |
3137 | * FIXME: support SetPortFeatures USB_PORT_FEAT_INDICATOR. | |
3138 | * HCS_INDICATOR may say we can change LEDs to off/amber/green. | |
3139 | * (track current state ourselves) ... blink for diagnostics, | |
3140 | * power, "this is the one", etc. EHCI spec supports this. | |
3141 | */ | |
3142 | ||
3143 | spin_lock_irqsave(&oxu->lock, flags); | |
3144 | switch (typeReq) { | |
3145 | case ClearHubFeature: | |
3146 | switch (wValue) { | |
3147 | case C_HUB_LOCAL_POWER: | |
3148 | case C_HUB_OVER_CURRENT: | |
3149 | /* no hub-wide feature/status flags */ | |
3150 | break; | |
3151 | default: | |
3152 | goto error; | |
3153 | } | |
3154 | break; | |
3155 | case ClearPortFeature: | |
3156 | if (!wIndex || wIndex > ports) | |
3157 | goto error; | |
3158 | wIndex--; | |
3159 | temp = readl(status_reg); | |
3160 | ||
3161 | /* | |
3162 | * Even if OWNER is set, so the port is owned by the | |
37ebb549 | 3163 | * companion controller, hub_wq needs to be able to clear |
b92a78e5 | 3164 | * the port-change status bits (especially |
749da5f8 | 3165 | * USB_PORT_STAT_C_CONNECTION). |
b92a78e5 RG |
3166 | */ |
3167 | ||
3168 | switch (wValue) { | |
3169 | case USB_PORT_FEAT_ENABLE: | |
3170 | writel(temp & ~PORT_PE, status_reg); | |
3171 | break; | |
3172 | case USB_PORT_FEAT_C_ENABLE: | |
3173 | writel((temp & ~PORT_RWC_BITS) | PORT_PEC, status_reg); | |
3174 | break; | |
3175 | case USB_PORT_FEAT_SUSPEND: | |
3176 | if (temp & PORT_RESET) | |
3177 | goto error; | |
3178 | if (temp & PORT_SUSPEND) { | |
3179 | if ((temp & PORT_PE) == 0) | |
3180 | goto error; | |
3181 | /* resume signaling for 20 msec */ | |
3182 | temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS); | |
3183 | writel(temp | PORT_RESUME, status_reg); | |
3184 | oxu->reset_done[wIndex] = jiffies | |
3185 | + msecs_to_jiffies(20); | |
3186 | } | |
3187 | break; | |
3188 | case USB_PORT_FEAT_C_SUSPEND: | |
3189 | /* we auto-clear this feature */ | |
3190 | break; | |
3191 | case USB_PORT_FEAT_POWER: | |
3192 | if (HCS_PPC(oxu->hcs_params)) | |
3193 | writel(temp & ~(PORT_RWC_BITS | PORT_POWER), | |
3194 | status_reg); | |
3195 | break; | |
3196 | case USB_PORT_FEAT_C_CONNECTION: | |
3197 | writel((temp & ~PORT_RWC_BITS) | PORT_CSC, status_reg); | |
3198 | break; | |
3199 | case USB_PORT_FEAT_C_OVER_CURRENT: | |
3200 | writel((temp & ~PORT_RWC_BITS) | PORT_OCC, status_reg); | |
3201 | break; | |
3202 | case USB_PORT_FEAT_C_RESET: | |
3203 | /* GetPortStatus clears reset */ | |
3204 | break; | |
3205 | default: | |
3206 | goto error; | |
3207 | } | |
3208 | readl(&oxu->regs->command); /* unblock posted write */ | |
3209 | break; | |
3210 | case GetHubDescriptor: | |
3211 | ehci_hub_descriptor(oxu, (struct usb_hub_descriptor *) | |
3212 | buf); | |
3213 | break; | |
3214 | case GetHubStatus: | |
3215 | /* no hub-wide feature/status flags */ | |
3216 | memset(buf, 0, 4); | |
3217 | break; | |
3218 | case GetPortStatus: | |
3219 | if (!wIndex || wIndex > ports) | |
3220 | goto error; | |
3221 | wIndex--; | |
3222 | status = 0; | |
3223 | temp = readl(status_reg); | |
3224 | ||
3225 | /* wPortChange bits */ | |
3226 | if (temp & PORT_CSC) | |
749da5f8 | 3227 | status |= USB_PORT_STAT_C_CONNECTION << 16; |
b92a78e5 | 3228 | if (temp & PORT_PEC) |
749da5f8 | 3229 | status |= USB_PORT_STAT_C_ENABLE << 16; |
b92a78e5 | 3230 | if ((temp & PORT_OCC) && !ignore_oc) |
749da5f8 | 3231 | status |= USB_PORT_STAT_C_OVERCURRENT << 16; |
b92a78e5 RG |
3232 | |
3233 | /* whoever resumes must GetPortStatus to complete it!! */ | |
3234 | if (temp & PORT_RESUME) { | |
3235 | ||
3236 | /* Remote Wakeup received? */ | |
3237 | if (!oxu->reset_done[wIndex]) { | |
3238 | /* resume signaling for 20 msec */ | |
3239 | oxu->reset_done[wIndex] = jiffies | |
3240 | + msecs_to_jiffies(20); | |
3241 | /* check the port again */ | |
3242 | mod_timer(&oxu_to_hcd(oxu)->rh_timer, | |
3243 | oxu->reset_done[wIndex]); | |
3244 | } | |
3245 | ||
3246 | /* resume completed? */ | |
3247 | else if (time_after_eq(jiffies, | |
3248 | oxu->reset_done[wIndex])) { | |
749da5f8 | 3249 | status |= USB_PORT_STAT_C_SUSPEND << 16; |
b92a78e5 RG |
3250 | oxu->reset_done[wIndex] = 0; |
3251 | ||
3252 | /* stop resume signaling */ | |
3253 | temp = readl(status_reg); | |
3254 | writel(temp & ~(PORT_RWC_BITS | PORT_RESUME), | |
3255 | status_reg); | |
3256 | retval = handshake(oxu, status_reg, | |
3257 | PORT_RESUME, 0, 2000 /* 2msec */); | |
3258 | if (retval != 0) { | |
3259 | oxu_err(oxu, | |
3260 | "port %d resume error %d\n", | |
3261 | wIndex + 1, retval); | |
3262 | goto error; | |
3263 | } | |
3264 | temp &= ~(PORT_SUSPEND|PORT_RESUME|(3<<10)); | |
3265 | } | |
3266 | } | |
3267 | ||
3268 | /* whoever resets must GetPortStatus to complete it!! */ | |
3269 | if ((temp & PORT_RESET) | |
3270 | && time_after_eq(jiffies, | |
3271 | oxu->reset_done[wIndex])) { | |
749da5f8 | 3272 | status |= USB_PORT_STAT_C_RESET << 16; |
b92a78e5 RG |
3273 | oxu->reset_done[wIndex] = 0; |
3274 | ||
3275 | /* force reset to complete */ | |
3276 | writel(temp & ~(PORT_RWC_BITS | PORT_RESET), | |
3277 | status_reg); | |
3278 | /* REVISIT: some hardware needs 550+ usec to clear | |
3279 | * this bit; seems too long to spin routinely... | |
3280 | */ | |
3281 | retval = handshake(oxu, status_reg, | |
3282 | PORT_RESET, 0, 750); | |
3283 | if (retval != 0) { | |
3284 | oxu_err(oxu, "port %d reset error %d\n", | |
3285 | wIndex + 1, retval); | |
3286 | goto error; | |
3287 | } | |
3288 | ||
3289 | /* see what we found out */ | |
3290 | temp = check_reset_complete(oxu, wIndex, status_reg, | |
3291 | readl(status_reg)); | |
3292 | } | |
3293 | ||
3294 | /* transfer dedicated ports to the companion hc */ | |
3295 | if ((temp & PORT_CONNECT) && | |
3296 | test_bit(wIndex, &oxu->companion_ports)) { | |
3297 | temp &= ~PORT_RWC_BITS; | |
3298 | temp |= PORT_OWNER; | |
3299 | writel(temp, status_reg); | |
3300 | oxu_dbg(oxu, "port %d --> companion\n", wIndex + 1); | |
3301 | temp = readl(status_reg); | |
3302 | } | |
3303 | ||
3304 | /* | |
37ebb549 | 3305 | * Even if OWNER is set, there's no harm letting hub_wq |
b92a78e5 RG |
3306 | * see the wPortStatus values (they should all be 0 except |
3307 | * for PORT_POWER anyway). | |
3308 | */ | |
3309 | ||
3310 | if (temp & PORT_CONNECT) { | |
749da5f8 | 3311 | status |= USB_PORT_STAT_CONNECTION; |
b92a78e5 RG |
3312 | /* status may be from integrated TT */ |
3313 | status |= oxu_port_speed(oxu, temp); | |
3314 | } | |
3315 | if (temp & PORT_PE) | |
749da5f8 | 3316 | status |= USB_PORT_STAT_ENABLE; |
b92a78e5 | 3317 | if (temp & (PORT_SUSPEND|PORT_RESUME)) |
749da5f8 | 3318 | status |= USB_PORT_STAT_SUSPEND; |
b92a78e5 | 3319 | if (temp & PORT_OC) |
749da5f8 | 3320 | status |= USB_PORT_STAT_OVERCURRENT; |
b92a78e5 | 3321 | if (temp & PORT_RESET) |
749da5f8 | 3322 | status |= USB_PORT_STAT_RESET; |
b92a78e5 | 3323 | if (temp & PORT_POWER) |
749da5f8 | 3324 | status |= USB_PORT_STAT_POWER; |
b92a78e5 RG |
3325 | |
3326 | #ifndef OXU_VERBOSE_DEBUG | |
3327 | if (status & ~0xffff) /* only if wPortChange is interesting */ | |
3328 | #endif | |
3329 | dbg_port(oxu, "GetStatus", wIndex + 1, temp); | |
3330 | put_unaligned(cpu_to_le32(status), (__le32 *) buf); | |
3331 | break; | |
3332 | case SetHubFeature: | |
3333 | switch (wValue) { | |
3334 | case C_HUB_LOCAL_POWER: | |
3335 | case C_HUB_OVER_CURRENT: | |
3336 | /* no hub-wide feature/status flags */ | |
3337 | break; | |
3338 | default: | |
3339 | goto error; | |
3340 | } | |
3341 | break; | |
3342 | case SetPortFeature: | |
3343 | selector = wIndex >> 8; | |
3344 | wIndex &= 0xff; | |
3345 | if (!wIndex || wIndex > ports) | |
3346 | goto error; | |
3347 | wIndex--; | |
3348 | temp = readl(status_reg); | |
3349 | if (temp & PORT_OWNER) | |
3350 | break; | |
3351 | ||
3352 | temp &= ~PORT_RWC_BITS; | |
3353 | switch (wValue) { | |
3354 | case USB_PORT_FEAT_SUSPEND: | |
3355 | if ((temp & PORT_PE) == 0 | |
3356 | || (temp & PORT_RESET) != 0) | |
3357 | goto error; | |
3358 | if (device_may_wakeup(&hcd->self.root_hub->dev)) | |
3359 | temp |= PORT_WAKE_BITS; | |
3360 | writel(temp | PORT_SUSPEND, status_reg); | |
3361 | break; | |
3362 | case USB_PORT_FEAT_POWER: | |
3363 | if (HCS_PPC(oxu->hcs_params)) | |
3364 | writel(temp | PORT_POWER, status_reg); | |
3365 | break; | |
3366 | case USB_PORT_FEAT_RESET: | |
3367 | if (temp & PORT_RESUME) | |
3368 | goto error; | |
3369 | /* line status bits may report this as low speed, | |
3370 | * which can be fine if this root hub has a | |
3371 | * transaction translator built in. | |
3372 | */ | |
3373 | oxu_vdbg(oxu, "port %d reset\n", wIndex + 1); | |
3374 | temp |= PORT_RESET; | |
3375 | temp &= ~PORT_PE; | |
3376 | ||
3377 | /* | |
3378 | * caller must wait, then call GetPortStatus | |
3379 | * usb 2.0 spec says 50 ms resets on root | |
3380 | */ | |
3381 | oxu->reset_done[wIndex] = jiffies | |
3382 | + msecs_to_jiffies(50); | |
3383 | writel(temp, status_reg); | |
3384 | break; | |
3385 | ||
3386 | /* For downstream facing ports (these): one hub port is put | |
3387 | * into test mode according to USB2 11.24.2.13, then the hub | |
3388 | * must be reset (which for root hub now means rmmod+modprobe, | |
3389 | * or else system reboot). See EHCI 2.3.9 and 4.14 for info | |
3390 | * about the EHCI-specific stuff. | |
3391 | */ | |
3392 | case USB_PORT_FEAT_TEST: | |
3393 | if (!selector || selector > 5) | |
3394 | goto error; | |
3395 | ehci_quiesce(oxu); | |
3396 | ehci_halt(oxu); | |
3397 | temp |= selector << 16; | |
3398 | writel(temp, status_reg); | |
3399 | break; | |
3400 | ||
3401 | default: | |
3402 | goto error; | |
3403 | } | |
3404 | readl(&oxu->regs->command); /* unblock posted writes */ | |
3405 | break; | |
3406 | ||
3407 | default: | |
3408 | error: | |
3409 | /* "stall" on error */ | |
3410 | retval = -EPIPE; | |
3411 | } | |
3412 | spin_unlock_irqrestore(&oxu->lock, flags); | |
3413 | return retval; | |
3414 | } | |
3415 | ||
3416 | #ifdef CONFIG_PM | |
3417 | ||
3418 | static int oxu_bus_suspend(struct usb_hcd *hcd) | |
3419 | { | |
3420 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
3421 | int port; | |
3422 | int mask; | |
3423 | ||
3424 | oxu_dbg(oxu, "suspend root hub\n"); | |
3425 | ||
3426 | if (time_before(jiffies, oxu->next_statechange)) | |
3427 | msleep(5); | |
3428 | ||
3429 | port = HCS_N_PORTS(oxu->hcs_params); | |
3430 | spin_lock_irq(&oxu->lock); | |
3431 | ||
3432 | /* stop schedules, clean any completed work */ | |
3433 | if (HC_IS_RUNNING(hcd->state)) { | |
3434 | ehci_quiesce(oxu); | |
3435 | hcd->state = HC_STATE_QUIESCING; | |
3436 | } | |
3437 | oxu->command = readl(&oxu->regs->command); | |
3438 | if (oxu->reclaim) | |
3439 | oxu->reclaim_ready = 1; | |
3440 | ehci_work(oxu); | |
3441 | ||
3442 | /* Unlike other USB host controller types, EHCI doesn't have | |
3443 | * any notion of "global" or bus-wide suspend. The driver has | |
3444 | * to manually suspend all the active unsuspended ports, and | |
3445 | * then manually resume them in the bus_resume() routine. | |
3446 | */ | |
3447 | oxu->bus_suspended = 0; | |
3448 | while (port--) { | |
3449 | u32 __iomem *reg = &oxu->regs->port_status[port]; | |
3450 | u32 t1 = readl(reg) & ~PORT_RWC_BITS; | |
3451 | u32 t2 = t1; | |
3452 | ||
3453 | /* keep track of which ports we suspend */ | |
3454 | if ((t1 & PORT_PE) && !(t1 & PORT_OWNER) && | |
3455 | !(t1 & PORT_SUSPEND)) { | |
3456 | t2 |= PORT_SUSPEND; | |
3457 | set_bit(port, &oxu->bus_suspended); | |
3458 | } | |
3459 | ||
3460 | /* enable remote wakeup on all ports */ | |
3461 | if (device_may_wakeup(&hcd->self.root_hub->dev)) | |
3462 | t2 |= PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E; | |
3463 | else | |
3464 | t2 &= ~(PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E); | |
3465 | ||
3466 | if (t1 != t2) { | |
3467 | oxu_vdbg(oxu, "port %d, %08x -> %08x\n", | |
3468 | port + 1, t1, t2); | |
3469 | writel(t2, reg); | |
3470 | } | |
3471 | } | |
3472 | ||
3473 | /* turn off now-idle HC */ | |
3474 | del_timer_sync(&oxu->watchdog); | |
3475 | ehci_halt(oxu); | |
3476 | hcd->state = HC_STATE_SUSPENDED; | |
3477 | ||
3478 | /* allow remote wakeup */ | |
3479 | mask = INTR_MASK; | |
3480 | if (!device_may_wakeup(&hcd->self.root_hub->dev)) | |
3481 | mask &= ~STS_PCD; | |
3482 | writel(mask, &oxu->regs->intr_enable); | |
3483 | readl(&oxu->regs->intr_enable); | |
3484 | ||
3485 | oxu->next_statechange = jiffies + msecs_to_jiffies(10); | |
3486 | spin_unlock_irq(&oxu->lock); | |
3487 | return 0; | |
3488 | } | |
3489 | ||
3490 | /* Caller has locked the root hub, and should reset/reinit on error */ | |
3491 | static int oxu_bus_resume(struct usb_hcd *hcd) | |
3492 | { | |
3493 | struct oxu_hcd *oxu = hcd_to_oxu(hcd); | |
3494 | u32 temp; | |
3495 | int i; | |
3496 | ||
3497 | if (time_before(jiffies, oxu->next_statechange)) | |
3498 | msleep(5); | |
3499 | spin_lock_irq(&oxu->lock); | |
3500 | ||
3501 | /* Ideally and we've got a real resume here, and no port's power | |
3502 | * was lost. (For PCI, that means Vaux was maintained.) But we | |
3503 | * could instead be restoring a swsusp snapshot -- so that BIOS was | |
3504 | * the last user of the controller, not reset/pm hardware keeping | |
3505 | * state we gave to it. | |
3506 | */ | |
3507 | temp = readl(&oxu->regs->intr_enable); | |
3508 | oxu_dbg(oxu, "resume root hub%s\n", temp ? "" : " after power loss"); | |
3509 | ||
3510 | /* at least some APM implementations will try to deliver | |
3511 | * IRQs right away, so delay them until we're ready. | |
3512 | */ | |
3513 | writel(0, &oxu->regs->intr_enable); | |
3514 | ||
3515 | /* re-init operational registers */ | |
3516 | writel(0, &oxu->regs->segment); | |
3517 | writel(oxu->periodic_dma, &oxu->regs->frame_list); | |
3518 | writel((u32) oxu->async->qh_dma, &oxu->regs->async_next); | |
3519 | ||
3520 | /* restore CMD_RUN, framelist size, and irq threshold */ | |
3521 | writel(oxu->command, &oxu->regs->command); | |
3522 | ||
3523 | /* Some controller/firmware combinations need a delay during which | |
3524 | * they set up the port statuses. See Bugzilla #8190. */ | |
3525 | mdelay(8); | |
3526 | ||
3527 | /* manually resume the ports we suspended during bus_suspend() */ | |
3528 | i = HCS_N_PORTS(oxu->hcs_params); | |
3529 | while (i--) { | |
3530 | temp = readl(&oxu->regs->port_status[i]); | |
3531 | temp &= ~(PORT_RWC_BITS | |
3532 | | PORT_WKOC_E | PORT_WKDISC_E | PORT_WKCONN_E); | |
3533 | if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) { | |
3534 | oxu->reset_done[i] = jiffies + msecs_to_jiffies(20); | |
3535 | temp |= PORT_RESUME; | |
3536 | } | |
3537 | writel(temp, &oxu->regs->port_status[i]); | |
3538 | } | |
3539 | i = HCS_N_PORTS(oxu->hcs_params); | |
3540 | mdelay(20); | |
3541 | while (i--) { | |
3542 | temp = readl(&oxu->regs->port_status[i]); | |
3543 | if (test_bit(i, &oxu->bus_suspended) && (temp & PORT_SUSPEND)) { | |
3544 | temp &= ~(PORT_RWC_BITS | PORT_RESUME); | |
3545 | writel(temp, &oxu->regs->port_status[i]); | |
3546 | oxu_vdbg(oxu, "resumed port %d\n", i + 1); | |
3547 | } | |
3548 | } | |
3549 | (void) readl(&oxu->regs->command); | |
3550 | ||
3551 | /* maybe re-activate the schedule(s) */ | |
3552 | temp = 0; | |
3553 | if (oxu->async->qh_next.qh) | |
3554 | temp |= CMD_ASE; | |
3555 | if (oxu->periodic_sched) | |
3556 | temp |= CMD_PSE; | |
3557 | if (temp) { | |
3558 | oxu->command |= temp; | |
3559 | writel(oxu->command, &oxu->regs->command); | |
3560 | } | |
3561 | ||
3562 | oxu->next_statechange = jiffies + msecs_to_jiffies(5); | |
3563 | hcd->state = HC_STATE_RUNNING; | |
3564 | ||
3565 | /* Now we can safely re-enable irqs */ | |
3566 | writel(INTR_MASK, &oxu->regs->intr_enable); | |
3567 | ||
3568 | spin_unlock_irq(&oxu->lock); | |
3569 | return 0; | |
3570 | } | |
3571 | ||
3572 | #else | |
3573 | ||
3574 | static int oxu_bus_suspend(struct usb_hcd *hcd) | |
3575 | { | |
3576 | return 0; | |
3577 | } | |
3578 | ||
3579 | static int oxu_bus_resume(struct usb_hcd *hcd) | |
3580 | { | |
3581 | return 0; | |
3582 | } | |
3583 | ||
3584 | #endif /* CONFIG_PM */ | |
3585 | ||
3586 | static const struct hc_driver oxu_hc_driver = { | |
3587 | .description = "oxu210hp_hcd", | |
3588 | .product_desc = "oxu210hp HCD", | |
3589 | .hcd_priv_size = sizeof(struct oxu_hcd), | |
3590 | ||
3591 | /* | |
3592 | * Generic hardware linkage | |
3593 | */ | |
3594 | .irq = oxu_irq, | |
3595 | .flags = HCD_MEMORY | HCD_USB2, | |
3596 | ||
3597 | /* | |
3598 | * Basic lifecycle operations | |
3599 | */ | |
3600 | .reset = oxu_reset, | |
3601 | .start = oxu_run, | |
3602 | .stop = oxu_stop, | |
3603 | .shutdown = oxu_shutdown, | |
3604 | ||
3605 | /* | |
3606 | * Managing i/o requests and associated device resources | |
3607 | */ | |
3608 | .urb_enqueue = oxu_urb_enqueue, | |
3609 | .urb_dequeue = oxu_urb_dequeue, | |
3610 | .endpoint_disable = oxu_endpoint_disable, | |
3611 | ||
3612 | /* | |
3613 | * Scheduling support | |
3614 | */ | |
3615 | .get_frame_number = oxu_get_frame, | |
3616 | ||
3617 | /* | |
3618 | * Root hub support | |
3619 | */ | |
3620 | .hub_status_data = oxu_hub_status_data, | |
3621 | .hub_control = oxu_hub_control, | |
3622 | .bus_suspend = oxu_bus_suspend, | |
3623 | .bus_resume = oxu_bus_resume, | |
3624 | }; | |
3625 | ||
3626 | /* | |
3627 | * Module stuff | |
3628 | */ | |
3629 | ||
3630 | static void oxu_configuration(struct platform_device *pdev, void *base) | |
3631 | { | |
3632 | u32 tmp; | |
3633 | ||
3634 | /* Initialize top level registers. | |
3635 | * First write ever | |
3636 | */ | |
3637 | oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D); | |
3638 | oxu_writel(base, OXU_SOFTRESET, OXU_SRESET); | |
3639 | oxu_writel(base, OXU_HOSTIFCONFIG, 0x0000037D); | |
3640 | ||
3641 | tmp = oxu_readl(base, OXU_PIOBURSTREADCTRL); | |
3642 | oxu_writel(base, OXU_PIOBURSTREADCTRL, tmp | 0x0040); | |
3643 | ||
3644 | oxu_writel(base, OXU_ASO, OXU_SPHPOEN | OXU_OVRCCURPUPDEN | | |
3645 | OXU_COMPARATOR | OXU_ASO_OP); | |
3646 | ||
3647 | tmp = oxu_readl(base, OXU_CLKCTRL_SET); | |
3648 | oxu_writel(base, OXU_CLKCTRL_SET, tmp | OXU_SYSCLKEN | OXU_USBOTGCLKEN); | |
3649 | ||
3650 | /* Clear all top interrupt enable */ | |
3651 | oxu_writel(base, OXU_CHIPIRQEN_CLR, 0xff); | |
3652 | ||
3653 | /* Clear all top interrupt status */ | |
3654 | oxu_writel(base, OXU_CHIPIRQSTATUS, 0xff); | |
3655 | ||
3656 | /* Enable all needed top interrupt except OTG SPH core */ | |
3657 | oxu_writel(base, OXU_CHIPIRQEN_SET, OXU_USBSPHLPWUI | OXU_USBOTGLPWUI); | |
3658 | } | |
3659 | ||
3660 | static int oxu_verify_id(struct platform_device *pdev, void *base) | |
3661 | { | |
3662 | u32 id; | |
82cef0b8 | 3663 | static const char * const bo[] = { |
b92a78e5 RG |
3664 | "reserved", |
3665 | "128-pin LQFP", | |
3666 | "84-pin TFBGA", | |
3667 | "reserved", | |
3668 | }; | |
3669 | ||
3670 | /* Read controller signature register to find a match */ | |
3671 | id = oxu_readl(base, OXU_DEVICEID); | |
3672 | dev_info(&pdev->dev, "device ID %x\n", id); | |
3673 | if ((id & OXU_REV_MASK) != (OXU_REV_2100 << OXU_REV_SHIFT)) | |
3674 | return -1; | |
3675 | ||
3676 | dev_info(&pdev->dev, "found device %x %s (%04x:%04x)\n", | |
3677 | id >> OXU_REV_SHIFT, | |
3678 | bo[(id & OXU_BO_MASK) >> OXU_BO_SHIFT], | |
3679 | (id & OXU_MAJ_REV_MASK) >> OXU_MAJ_REV_SHIFT, | |
3680 | (id & OXU_MIN_REV_MASK) >> OXU_MIN_REV_SHIFT); | |
3681 | ||
3682 | return 0; | |
3683 | } | |
3684 | ||
3685 | static const struct hc_driver oxu_hc_driver; | |
3686 | static struct usb_hcd *oxu_create(struct platform_device *pdev, | |
3687 | unsigned long memstart, unsigned long memlen, | |
3688 | void *base, int irq, int otg) | |
3689 | { | |
3690 | struct device *dev = &pdev->dev; | |
3691 | ||
3692 | struct usb_hcd *hcd; | |
3693 | struct oxu_hcd *oxu; | |
3694 | int ret; | |
3695 | ||
3696 | /* Set endian mode and host mode */ | |
3697 | oxu_writel(base + (otg ? OXU_OTG_CORE_OFFSET : OXU_SPH_CORE_OFFSET), | |
3698 | OXU_USBMODE, | |
3699 | OXU_CM_HOST_ONLY | OXU_ES_LITTLE | OXU_VBPS); | |
3700 | ||
3701 | hcd = usb_create_hcd(&oxu_hc_driver, dev, | |
3702 | otg ? "oxu210hp_otg" : "oxu210hp_sph"); | |
3703 | if (!hcd) | |
3704 | return ERR_PTR(-ENOMEM); | |
3705 | ||
3706 | hcd->rsrc_start = memstart; | |
3707 | hcd->rsrc_len = memlen; | |
3708 | hcd->regs = base; | |
3709 | hcd->irq = irq; | |
3710 | hcd->state = HC_STATE_HALT; | |
3711 | ||
3712 | oxu = hcd_to_oxu(hcd); | |
3713 | oxu->is_otg = otg; | |
3714 | ||
3715 | ret = usb_add_hcd(hcd, irq, IRQF_SHARED); | |
3716 | if (ret < 0) | |
3717 | return ERR_PTR(ret); | |
3718 | ||
3c9740a1 | 3719 | device_wakeup_enable(hcd->self.controller); |
b92a78e5 RG |
3720 | return hcd; |
3721 | } | |
3722 | ||
3723 | static int oxu_init(struct platform_device *pdev, | |
3724 | unsigned long memstart, unsigned long memlen, | |
3725 | void *base, int irq) | |
3726 | { | |
3727 | struct oxu_info *info = platform_get_drvdata(pdev); | |
3728 | struct usb_hcd *hcd; | |
3729 | int ret; | |
3730 | ||
3731 | /* First time configuration at start up */ | |
3732 | oxu_configuration(pdev, base); | |
3733 | ||
3734 | ret = oxu_verify_id(pdev, base); | |
3735 | if (ret) { | |
3736 | dev_err(&pdev->dev, "no devices found!\n"); | |
3737 | return -ENODEV; | |
3738 | } | |
3739 | ||
3740 | /* Create the OTG controller */ | |
3741 | hcd = oxu_create(pdev, memstart, memlen, base, irq, 1); | |
3742 | if (IS_ERR(hcd)) { | |
3743 | dev_err(&pdev->dev, "cannot create OTG controller!\n"); | |
3744 | ret = PTR_ERR(hcd); | |
3745 | goto error_create_otg; | |
3746 | } | |
3747 | info->hcd[0] = hcd; | |
3748 | ||
3749 | /* Create the SPH host controller */ | |
3750 | hcd = oxu_create(pdev, memstart, memlen, base, irq, 0); | |
3751 | if (IS_ERR(hcd)) { | |
3752 | dev_err(&pdev->dev, "cannot create SPH controller!\n"); | |
3753 | ret = PTR_ERR(hcd); | |
3754 | goto error_create_sph; | |
3755 | } | |
3756 | info->hcd[1] = hcd; | |
3757 | ||
3758 | oxu_writel(base, OXU_CHIPIRQEN_SET, | |
3759 | oxu_readl(base, OXU_CHIPIRQEN_SET) | 3); | |
3760 | ||
3761 | return 0; | |
3762 | ||
3763 | error_create_sph: | |
3764 | usb_remove_hcd(info->hcd[0]); | |
3765 | usb_put_hcd(info->hcd[0]); | |
3766 | ||
3767 | error_create_otg: | |
3768 | return ret; | |
3769 | } | |
3770 | ||
3771 | static int oxu_drv_probe(struct platform_device *pdev) | |
3772 | { | |
3773 | struct resource *res; | |
3774 | void *base; | |
3775 | unsigned long memstart, memlen; | |
3776 | int irq, ret; | |
3777 | struct oxu_info *info; | |
3778 | ||
3779 | if (usb_disabled()) | |
3780 | return -ENODEV; | |
3781 | ||
3782 | /* | |
3783 | * Get the platform resources | |
3784 | */ | |
3785 | res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); | |
3786 | if (!res) { | |
3787 | dev_err(&pdev->dev, | |
74c71ebd | 3788 | "no IRQ! Check %s setup!\n", dev_name(&pdev->dev)); |
b92a78e5 RG |
3789 | return -ENODEV; |
3790 | } | |
3791 | irq = res->start; | |
3792 | dev_dbg(&pdev->dev, "IRQ resource %d\n", irq); | |
3793 | ||
3794 | res = platform_get_resource(pdev, IORESOURCE_MEM, 0); | |
8968614a HS |
3795 | base = devm_ioremap_resource(&pdev->dev, res); |
3796 | if (IS_ERR(base)) { | |
3797 | ret = PTR_ERR(base); | |
3798 | goto error; | |
b92a78e5 RG |
3799 | } |
3800 | memstart = res->start; | |
28f65c11 | 3801 | memlen = resource_size(res); |
b92a78e5 | 3802 | |
dced35ae | 3803 | ret = irq_set_irq_type(irq, IRQF_TRIGGER_FALLING); |
b92a78e5 RG |
3804 | if (ret) { |
3805 | dev_err(&pdev->dev, "error setting irq type\n"); | |
3806 | ret = -EFAULT; | |
8968614a | 3807 | goto error; |
b92a78e5 RG |
3808 | } |
3809 | ||
3810 | /* Allocate a driver data struct to hold useful info for both | |
3811 | * SPH & OTG devices | |
3812 | */ | |
8968614a | 3813 | info = devm_kzalloc(&pdev->dev, sizeof(struct oxu_info), GFP_KERNEL); |
b92a78e5 | 3814 | if (!info) { |
b92a78e5 | 3815 | ret = -EFAULT; |
8968614a | 3816 | goto error; |
b92a78e5 RG |
3817 | } |
3818 | platform_set_drvdata(pdev, info); | |
3819 | ||
3820 | ret = oxu_init(pdev, memstart, memlen, base, irq); | |
3821 | if (ret < 0) { | |
3822 | dev_dbg(&pdev->dev, "cannot init USB devices\n"); | |
8968614a | 3823 | goto error; |
b92a78e5 RG |
3824 | } |
3825 | ||
3826 | dev_info(&pdev->dev, "devices enabled and running\n"); | |
3827 | platform_set_drvdata(pdev, info); | |
3828 | ||
3829 | return 0; | |
3830 | ||
8968614a | 3831 | error: |
74c71ebd | 3832 | dev_err(&pdev->dev, "init %s fail, %d\n", dev_name(&pdev->dev), ret); |
b92a78e5 RG |
3833 | return ret; |
3834 | } | |
3835 | ||
3836 | static void oxu_remove(struct platform_device *pdev, struct usb_hcd *hcd) | |
3837 | { | |
3838 | usb_remove_hcd(hcd); | |
3839 | usb_put_hcd(hcd); | |
3840 | } | |
3841 | ||
3842 | static int oxu_drv_remove(struct platform_device *pdev) | |
3843 | { | |
3844 | struct oxu_info *info = platform_get_drvdata(pdev); | |
b92a78e5 RG |
3845 | |
3846 | oxu_remove(pdev, info->hcd[0]); | |
3847 | oxu_remove(pdev, info->hcd[1]); | |
3848 | ||
b92a78e5 RG |
3849 | return 0; |
3850 | } | |
3851 | ||
3852 | static void oxu_drv_shutdown(struct platform_device *pdev) | |
3853 | { | |
3854 | oxu_drv_remove(pdev); | |
3855 | } | |
3856 | ||
3857 | #if 0 | |
3858 | /* FIXME: TODO */ | |
3859 | static int oxu_drv_suspend(struct device *dev) | |
3860 | { | |
3861 | struct platform_device *pdev = to_platform_device(dev); | |
3862 | struct usb_hcd *hcd = dev_get_drvdata(dev); | |
3863 | ||
3864 | return 0; | |
3865 | } | |
3866 | ||
3867 | static int oxu_drv_resume(struct device *dev) | |
3868 | { | |
3869 | struct platform_device *pdev = to_platform_device(dev); | |
3870 | struct usb_hcd *hcd = dev_get_drvdata(dev); | |
3871 | ||
3872 | return 0; | |
3873 | } | |
3874 | #else | |
3875 | #define oxu_drv_suspend NULL | |
3876 | #define oxu_drv_resume NULL | |
3877 | #endif | |
3878 | ||
3879 | static struct platform_driver oxu_driver = { | |
3880 | .probe = oxu_drv_probe, | |
3881 | .remove = oxu_drv_remove, | |
3882 | .shutdown = oxu_drv_shutdown, | |
3883 | .suspend = oxu_drv_suspend, | |
3884 | .resume = oxu_drv_resume, | |
3885 | .driver = { | |
3886 | .name = "oxu210hp-hcd", | |
3887 | .bus = &platform_bus_type | |
3888 | } | |
3889 | }; | |
3890 | ||
cc27c96c | 3891 | module_platform_driver(oxu_driver); |
b92a78e5 RG |
3892 | |
3893 | MODULE_DESCRIPTION("Oxford OXU210HP HCD driver - ver. " DRIVER_VERSION); | |
3894 | MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>"); | |
3895 | MODULE_LICENSE("GPL"); |