Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (c) 2001-2004 by David Brownell | |
3 | * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify it | |
6 | * under the terms of the GNU General Public License as published by the | |
7 | * Free Software Foundation; either version 2 of the License, or (at your | |
8 | * option) any later version. | |
9 | * | |
10 | * This program is distributed in the hope that it will be useful, but | |
11 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
12 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
13 | * for more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License | |
16 | * along with this program; if not, write to the Free Software Foundation, | |
17 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
18 | */ | |
19 | ||
20 | /* this file is part of ehci-hcd.c */ | |
21 | ||
22 | /*-------------------------------------------------------------------------*/ | |
23 | ||
24 | /* | |
25 | * EHCI scheduled transaction support: interrupt, iso, split iso | |
26 | * These are called "periodic" transactions in the EHCI spec. | |
27 | * | |
28 | * Note that for interrupt transfers, the QH/QTD manipulation is shared | |
29 | * with the "asynchronous" transaction support (control/bulk transfers). | |
30 | * The only real difference is in how interrupt transfers are scheduled. | |
31 | * | |
32 | * For ISO, we make an "iso_stream" head to serve the same role as a QH. | |
33 | * It keeps track of every ITD (or SITD) that's linked, and holds enough | |
34 | * pre-calculated schedule data to make appending to the queue be quick. | |
35 | */ | |
36 | ||
37 | static int ehci_get_frame (struct usb_hcd *hcd); | |
38 | ||
39 | /*-------------------------------------------------------------------------*/ | |
40 | ||
41 | /* | |
42 | * periodic_next_shadow - return "next" pointer on shadow list | |
43 | * @periodic: host pointer to qh/itd/sitd | |
44 | * @tag: hardware tag for type of this record | |
45 | */ | |
46 | static union ehci_shadow * | |
47 | periodic_next_shadow (union ehci_shadow *periodic, __le32 tag) | |
48 | { | |
49 | switch (tag) { | |
50 | case Q_TYPE_QH: | |
51 | return &periodic->qh->qh_next; | |
52 | case Q_TYPE_FSTN: | |
53 | return &periodic->fstn->fstn_next; | |
54 | case Q_TYPE_ITD: | |
55 | return &periodic->itd->itd_next; | |
56 | // case Q_TYPE_SITD: | |
57 | default: | |
58 | return &periodic->sitd->sitd_next; | |
59 | } | |
60 | } | |
61 | ||
62 | /* caller must hold ehci->lock */ | |
63 | static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr) | |
64 | { | |
65 | union ehci_shadow *prev_p = &ehci->pshadow [frame]; | |
66 | __le32 *hw_p = &ehci->periodic [frame]; | |
67 | union ehci_shadow here = *prev_p; | |
68 | ||
69 | /* find predecessor of "ptr"; hw and shadow lists are in sync */ | |
70 | while (here.ptr && here.ptr != ptr) { | |
71 | prev_p = periodic_next_shadow (prev_p, Q_NEXT_TYPE (*hw_p)); | |
72 | hw_p = here.hw_next; | |
73 | here = *prev_p; | |
74 | } | |
75 | /* an interrupt entry (at list end) could have been shared */ | |
76 | if (!here.ptr) | |
77 | return; | |
78 | ||
79 | /* update shadow and hardware lists ... the old "next" pointers | |
80 | * from ptr may still be in use, the caller updates them. | |
81 | */ | |
82 | *prev_p = *periodic_next_shadow (&here, Q_NEXT_TYPE (*hw_p)); | |
83 | *hw_p = *here.hw_next; | |
84 | } | |
85 | ||
86 | /* how many of the uframe's 125 usecs are allocated? */ | |
87 | static unsigned short | |
88 | periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe) | |
89 | { | |
90 | __le32 *hw_p = &ehci->periodic [frame]; | |
91 | union ehci_shadow *q = &ehci->pshadow [frame]; | |
92 | unsigned usecs = 0; | |
93 | ||
94 | while (q->ptr) { | |
95 | switch (Q_NEXT_TYPE (*hw_p)) { | |
96 | case Q_TYPE_QH: | |
97 | /* is it in the S-mask? */ | |
98 | if (q->qh->hw_info2 & cpu_to_le32 (1 << uframe)) | |
99 | usecs += q->qh->usecs; | |
100 | /* ... or C-mask? */ | |
101 | if (q->qh->hw_info2 & cpu_to_le32 (1 << (8 + uframe))) | |
102 | usecs += q->qh->c_usecs; | |
103 | hw_p = &q->qh->hw_next; | |
104 | q = &q->qh->qh_next; | |
105 | break; | |
106 | // case Q_TYPE_FSTN: | |
107 | default: | |
108 | /* for "save place" FSTNs, count the relevant INTR | |
109 | * bandwidth from the previous frame | |
110 | */ | |
111 | if (q->fstn->hw_prev != EHCI_LIST_END) { | |
112 | ehci_dbg (ehci, "ignoring FSTN cost ...\n"); | |
113 | } | |
114 | hw_p = &q->fstn->hw_next; | |
115 | q = &q->fstn->fstn_next; | |
116 | break; | |
117 | case Q_TYPE_ITD: | |
118 | usecs += q->itd->usecs [uframe]; | |
119 | hw_p = &q->itd->hw_next; | |
120 | q = &q->itd->itd_next; | |
121 | break; | |
122 | case Q_TYPE_SITD: | |
123 | /* is it in the S-mask? (count SPLIT, DATA) */ | |
124 | if (q->sitd->hw_uframe & cpu_to_le32 (1 << uframe)) { | |
125 | if (q->sitd->hw_fullspeed_ep & | |
126 | __constant_cpu_to_le32 (1<<31)) | |
127 | usecs += q->sitd->stream->usecs; | |
128 | else /* worst case for OUT start-split */ | |
129 | usecs += HS_USECS_ISO (188); | |
130 | } | |
131 | ||
132 | /* ... C-mask? (count CSPLIT, DATA) */ | |
133 | if (q->sitd->hw_uframe & | |
134 | cpu_to_le32 (1 << (8 + uframe))) { | |
135 | /* worst case for IN complete-split */ | |
136 | usecs += q->sitd->stream->c_usecs; | |
137 | } | |
138 | ||
139 | hw_p = &q->sitd->hw_next; | |
140 | q = &q->sitd->sitd_next; | |
141 | break; | |
142 | } | |
143 | } | |
144 | #ifdef DEBUG | |
145 | if (usecs > 100) | |
146 | ehci_err (ehci, "uframe %d sched overrun: %d usecs\n", | |
147 | frame * 8 + uframe, usecs); | |
148 | #endif | |
149 | return usecs; | |
150 | } | |
151 | ||
152 | /*-------------------------------------------------------------------------*/ | |
153 | ||
154 | static int same_tt (struct usb_device *dev1, struct usb_device *dev2) | |
155 | { | |
156 | if (!dev1->tt || !dev2->tt) | |
157 | return 0; | |
158 | if (dev1->tt != dev2->tt) | |
159 | return 0; | |
160 | if (dev1->tt->multi) | |
161 | return dev1->ttport == dev2->ttport; | |
162 | else | |
163 | return 1; | |
164 | } | |
165 | ||
166 | /* return true iff the device's transaction translator is available | |
167 | * for a periodic transfer starting at the specified frame, using | |
168 | * all the uframes in the mask. | |
169 | */ | |
170 | static int tt_no_collision ( | |
171 | struct ehci_hcd *ehci, | |
172 | unsigned period, | |
173 | struct usb_device *dev, | |
174 | unsigned frame, | |
175 | u32 uf_mask | |
176 | ) | |
177 | { | |
178 | if (period == 0) /* error */ | |
179 | return 0; | |
180 | ||
181 | /* note bandwidth wastage: split never follows csplit | |
182 | * (different dev or endpoint) until the next uframe. | |
183 | * calling convention doesn't make that distinction. | |
184 | */ | |
185 | for (; frame < ehci->periodic_size; frame += period) { | |
186 | union ehci_shadow here; | |
187 | __le32 type; | |
188 | ||
189 | here = ehci->pshadow [frame]; | |
190 | type = Q_NEXT_TYPE (ehci->periodic [frame]); | |
191 | while (here.ptr) { | |
192 | switch (type) { | |
193 | case Q_TYPE_ITD: | |
194 | type = Q_NEXT_TYPE (here.itd->hw_next); | |
195 | here = here.itd->itd_next; | |
196 | continue; | |
197 | case Q_TYPE_QH: | |
198 | if (same_tt (dev, here.qh->dev)) { | |
199 | u32 mask; | |
200 | ||
201 | mask = le32_to_cpu (here.qh->hw_info2); | |
202 | /* "knows" no gap is needed */ | |
203 | mask |= mask >> 8; | |
204 | if (mask & uf_mask) | |
205 | break; | |
206 | } | |
207 | type = Q_NEXT_TYPE (here.qh->hw_next); | |
208 | here = here.qh->qh_next; | |
209 | continue; | |
210 | case Q_TYPE_SITD: | |
211 | if (same_tt (dev, here.sitd->urb->dev)) { | |
212 | u16 mask; | |
213 | ||
214 | mask = le32_to_cpu (here.sitd | |
215 | ->hw_uframe); | |
216 | /* FIXME assumes no gap for IN! */ | |
217 | mask |= mask >> 8; | |
218 | if (mask & uf_mask) | |
219 | break; | |
220 | } | |
221 | type = Q_NEXT_TYPE (here.sitd->hw_next); | |
222 | here = here.sitd->sitd_next; | |
223 | continue; | |
224 | // case Q_TYPE_FSTN: | |
225 | default: | |
226 | ehci_dbg (ehci, | |
227 | "periodic frame %d bogus type %d\n", | |
228 | frame, type); | |
229 | } | |
230 | ||
231 | /* collision or error */ | |
232 | return 0; | |
233 | } | |
234 | } | |
235 | ||
236 | /* no collision */ | |
237 | return 1; | |
238 | } | |
239 | ||
240 | /*-------------------------------------------------------------------------*/ | |
241 | ||
242 | static int enable_periodic (struct ehci_hcd *ehci) | |
243 | { | |
244 | u32 cmd; | |
245 | int status; | |
246 | ||
247 | /* did clearing PSE did take effect yet? | |
248 | * takes effect only at frame boundaries... | |
249 | */ | |
250 | status = handshake (&ehci->regs->status, STS_PSS, 0, 9 * 125); | |
251 | if (status != 0) { | |
252 | ehci_to_hcd(ehci)->state = HC_STATE_HALT; | |
253 | return status; | |
254 | } | |
255 | ||
256 | cmd = readl (&ehci->regs->command) | CMD_PSE; | |
257 | writel (cmd, &ehci->regs->command); | |
258 | /* posted write ... PSS happens later */ | |
259 | ehci_to_hcd(ehci)->state = HC_STATE_RUNNING; | |
260 | ||
261 | /* make sure ehci_work scans these */ | |
262 | ehci->next_uframe = readl (&ehci->regs->frame_index) | |
263 | % (ehci->periodic_size << 3); | |
264 | return 0; | |
265 | } | |
266 | ||
267 | static int disable_periodic (struct ehci_hcd *ehci) | |
268 | { | |
269 | u32 cmd; | |
270 | int status; | |
271 | ||
272 | /* did setting PSE not take effect yet? | |
273 | * takes effect only at frame boundaries... | |
274 | */ | |
275 | status = handshake (&ehci->regs->status, STS_PSS, STS_PSS, 9 * 125); | |
276 | if (status != 0) { | |
277 | ehci_to_hcd(ehci)->state = HC_STATE_HALT; | |
278 | return status; | |
279 | } | |
280 | ||
281 | cmd = readl (&ehci->regs->command) & ~CMD_PSE; | |
282 | writel (cmd, &ehci->regs->command); | |
283 | /* posted write ... */ | |
284 | ||
285 | ehci->next_uframe = -1; | |
286 | return 0; | |
287 | } | |
288 | ||
289 | /*-------------------------------------------------------------------------*/ | |
290 | ||
291 | /* periodic schedule slots have iso tds (normal or split) first, then a | |
292 | * sparse tree for active interrupt transfers. | |
293 | * | |
294 | * this just links in a qh; caller guarantees uframe masks are set right. | |
295 | * no FSTN support (yet; ehci 0.96+) | |
296 | */ | |
297 | static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) | |
298 | { | |
299 | unsigned i; | |
300 | unsigned period = qh->period; | |
301 | ||
302 | dev_dbg (&qh->dev->dev, | |
303 | "link qh%d-%04x/%p start %d [%d/%d us]\n", | |
7dedacf4 | 304 | period, le32_to_cpup (&qh->hw_info2) & (QH_CMASK | QH_SMASK), |
1da177e4 LT |
305 | qh, qh->start, qh->usecs, qh->c_usecs); |
306 | ||
307 | /* high bandwidth, or otherwise every microframe */ | |
308 | if (period == 0) | |
309 | period = 1; | |
310 | ||
311 | for (i = qh->start; i < ehci->periodic_size; i += period) { | |
312 | union ehci_shadow *prev = &ehci->pshadow [i]; | |
9a5d3e98 | 313 | __le32 *hw_p = &ehci->periodic [i]; |
1da177e4 | 314 | union ehci_shadow here = *prev; |
9a5d3e98 | 315 | __le32 type = 0; |
1da177e4 LT |
316 | |
317 | /* skip the iso nodes at list head */ | |
318 | while (here.ptr) { | |
319 | type = Q_NEXT_TYPE (*hw_p); | |
320 | if (type == Q_TYPE_QH) | |
321 | break; | |
322 | prev = periodic_next_shadow (prev, type); | |
323 | hw_p = &here.qh->hw_next; | |
324 | here = *prev; | |
325 | } | |
326 | ||
327 | /* sorting each branch by period (slow-->fast) | |
328 | * enables sharing interior tree nodes | |
329 | */ | |
330 | while (here.ptr && qh != here.qh) { | |
331 | if (qh->period > here.qh->period) | |
332 | break; | |
333 | prev = &here.qh->qh_next; | |
334 | hw_p = &here.qh->hw_next; | |
335 | here = *prev; | |
336 | } | |
337 | /* link in this qh, unless some earlier pass did that */ | |
338 | if (qh != here.qh) { | |
339 | qh->qh_next = here; | |
340 | if (here.qh) | |
341 | qh->hw_next = *hw_p; | |
342 | wmb (); | |
343 | prev->qh = qh; | |
344 | *hw_p = QH_NEXT (qh->qh_dma); | |
345 | } | |
346 | } | |
347 | qh->qh_state = QH_STATE_LINKED; | |
348 | qh_get (qh); | |
349 | ||
350 | /* update per-qh bandwidth for usbfs */ | |
351 | ehci_to_hcd(ehci)->self.bandwidth_allocated += qh->period | |
352 | ? ((qh->usecs + qh->c_usecs) / qh->period) | |
353 | : (qh->usecs * 8); | |
354 | ||
355 | /* maybe enable periodic schedule processing */ | |
356 | if (!ehci->periodic_sched++) | |
357 | return enable_periodic (ehci); | |
358 | ||
359 | return 0; | |
360 | } | |
361 | ||
362 | static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) | |
363 | { | |
364 | unsigned i; | |
365 | unsigned period; | |
366 | ||
367 | // FIXME: | |
368 | // IF this isn't high speed | |
369 | // and this qh is active in the current uframe | |
370 | // (and overlay token SplitXstate is false?) | |
371 | // THEN | |
372 | // qh->hw_info1 |= __constant_cpu_to_le32 (1 << 7 /* "ignore" */); | |
373 | ||
374 | /* high bandwidth, or otherwise part of every microframe */ | |
375 | if ((period = qh->period) == 0) | |
376 | period = 1; | |
377 | ||
378 | for (i = qh->start; i < ehci->periodic_size; i += period) | |
379 | periodic_unlink (ehci, i, qh); | |
380 | ||
381 | /* update per-qh bandwidth for usbfs */ | |
382 | ehci_to_hcd(ehci)->self.bandwidth_allocated -= qh->period | |
383 | ? ((qh->usecs + qh->c_usecs) / qh->period) | |
384 | : (qh->usecs * 8); | |
385 | ||
386 | dev_dbg (&qh->dev->dev, | |
387 | "unlink qh%d-%04x/%p start %d [%d/%d us]\n", | |
7dedacf4 DB |
388 | qh->period, |
389 | le32_to_cpup (&qh->hw_info2) & (QH_CMASK | QH_SMASK), | |
1da177e4 LT |
390 | qh, qh->start, qh->usecs, qh->c_usecs); |
391 | ||
392 | /* qh->qh_next still "live" to HC */ | |
393 | qh->qh_state = QH_STATE_UNLINK; | |
394 | qh->qh_next.ptr = NULL; | |
395 | qh_put (qh); | |
396 | ||
397 | /* maybe turn off periodic schedule */ | |
398 | ehci->periodic_sched--; | |
399 | if (!ehci->periodic_sched) | |
400 | (void) disable_periodic (ehci); | |
401 | } | |
402 | ||
403 | static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh) | |
404 | { | |
405 | unsigned wait; | |
406 | ||
407 | qh_unlink_periodic (ehci, qh); | |
408 | ||
409 | /* simple/paranoid: always delay, expecting the HC needs to read | |
410 | * qh->hw_next or finish a writeback after SPLIT/CSPLIT ... and | |
411 | * expect khubd to clean up after any CSPLITs we won't issue. | |
412 | * active high speed queues may need bigger delays... | |
413 | */ | |
414 | if (list_empty (&qh->qtd_list) | |
7dedacf4 | 415 | || (__constant_cpu_to_le32 (QH_CMASK) |
1da177e4 LT |
416 | & qh->hw_info2) != 0) |
417 | wait = 2; | |
418 | else | |
419 | wait = 55; /* worst case: 3 * 1024 */ | |
420 | ||
421 | udelay (wait); | |
422 | qh->qh_state = QH_STATE_IDLE; | |
423 | qh->hw_next = EHCI_LIST_END; | |
424 | wmb (); | |
425 | } | |
426 | ||
427 | /*-------------------------------------------------------------------------*/ | |
428 | ||
429 | static int check_period ( | |
430 | struct ehci_hcd *ehci, | |
431 | unsigned frame, | |
432 | unsigned uframe, | |
433 | unsigned period, | |
434 | unsigned usecs | |
435 | ) { | |
436 | int claimed; | |
437 | ||
438 | /* complete split running into next frame? | |
439 | * given FSTN support, we could sometimes check... | |
440 | */ | |
441 | if (uframe >= 8) | |
442 | return 0; | |
443 | ||
444 | /* | |
445 | * 80% periodic == 100 usec/uframe available | |
446 | * convert "usecs we need" to "max already claimed" | |
447 | */ | |
448 | usecs = 100 - usecs; | |
449 | ||
450 | /* we "know" 2 and 4 uframe intervals were rejected; so | |
451 | * for period 0, check _every_ microframe in the schedule. | |
452 | */ | |
453 | if (unlikely (period == 0)) { | |
454 | do { | |
455 | for (uframe = 0; uframe < 7; uframe++) { | |
456 | claimed = periodic_usecs (ehci, frame, uframe); | |
457 | if (claimed > usecs) | |
458 | return 0; | |
459 | } | |
460 | } while ((frame += 1) < ehci->periodic_size); | |
461 | ||
462 | /* just check the specified uframe, at that period */ | |
463 | } else { | |
464 | do { | |
465 | claimed = periodic_usecs (ehci, frame, uframe); | |
466 | if (claimed > usecs) | |
467 | return 0; | |
468 | } while ((frame += period) < ehci->periodic_size); | |
469 | } | |
470 | ||
471 | // success! | |
472 | return 1; | |
473 | } | |
474 | ||
475 | static int check_intr_schedule ( | |
476 | struct ehci_hcd *ehci, | |
477 | unsigned frame, | |
478 | unsigned uframe, | |
479 | const struct ehci_qh *qh, | |
480 | __le32 *c_maskp | |
481 | ) | |
482 | { | |
483 | int retval = -ENOSPC; | |
484 | u8 mask; | |
485 | ||
486 | if (qh->c_usecs && uframe >= 6) /* FSTN territory? */ | |
487 | goto done; | |
488 | ||
489 | if (!check_period (ehci, frame, uframe, qh->period, qh->usecs)) | |
490 | goto done; | |
491 | if (!qh->c_usecs) { | |
492 | retval = 0; | |
493 | *c_maskp = 0; | |
494 | goto done; | |
495 | } | |
496 | ||
497 | /* Make sure this tt's buffer is also available for CSPLITs. | |
498 | * We pessimize a bit; probably the typical full speed case | |
499 | * doesn't need the second CSPLIT. | |
500 | * | |
501 | * NOTE: both SPLIT and CSPLIT could be checked in just | |
502 | * one smart pass... | |
503 | */ | |
504 | mask = 0x03 << (uframe + qh->gap_uf); | |
505 | *c_maskp = cpu_to_le32 (mask << 8); | |
506 | ||
507 | mask |= 1 << uframe; | |
508 | if (tt_no_collision (ehci, qh->period, qh->dev, frame, mask)) { | |
509 | if (!check_period (ehci, frame, uframe + qh->gap_uf + 1, | |
510 | qh->period, qh->c_usecs)) | |
511 | goto done; | |
512 | if (!check_period (ehci, frame, uframe + qh->gap_uf, | |
513 | qh->period, qh->c_usecs)) | |
514 | goto done; | |
515 | retval = 0; | |
516 | } | |
517 | done: | |
518 | return retval; | |
519 | } | |
520 | ||
521 | /* "first fit" scheduling policy used the first time through, | |
522 | * or when the previous schedule slot can't be re-used. | |
523 | */ | |
524 | static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh) | |
525 | { | |
526 | int status; | |
527 | unsigned uframe; | |
528 | __le32 c_mask; | |
529 | unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ | |
530 | ||
531 | qh_refresh(ehci, qh); | |
532 | qh->hw_next = EHCI_LIST_END; | |
533 | frame = qh->start; | |
534 | ||
535 | /* reuse the previous schedule slots, if we can */ | |
536 | if (frame < qh->period) { | |
7dedacf4 | 537 | uframe = ffs (le32_to_cpup (&qh->hw_info2) & QH_SMASK); |
1da177e4 LT |
538 | status = check_intr_schedule (ehci, frame, --uframe, |
539 | qh, &c_mask); | |
540 | } else { | |
541 | uframe = 0; | |
542 | c_mask = 0; | |
543 | status = -ENOSPC; | |
544 | } | |
545 | ||
546 | /* else scan the schedule to find a group of slots such that all | |
547 | * uframes have enough periodic bandwidth available. | |
548 | */ | |
549 | if (status) { | |
550 | /* "normal" case, uframing flexible except with splits */ | |
551 | if (qh->period) { | |
552 | frame = qh->period - 1; | |
553 | do { | |
554 | for (uframe = 0; uframe < 8; uframe++) { | |
555 | status = check_intr_schedule (ehci, | |
556 | frame, uframe, qh, | |
557 | &c_mask); | |
558 | if (status == 0) | |
559 | break; | |
560 | } | |
561 | } while (status && frame--); | |
562 | ||
563 | /* qh->period == 0 means every uframe */ | |
564 | } else { | |
565 | frame = 0; | |
566 | status = check_intr_schedule (ehci, 0, 0, qh, &c_mask); | |
567 | } | |
568 | if (status) | |
569 | goto done; | |
570 | qh->start = frame; | |
571 | ||
572 | /* reset S-frame and (maybe) C-frame masks */ | |
7dedacf4 | 573 | qh->hw_info2 &= __constant_cpu_to_le32(~(QH_CMASK | QH_SMASK)); |
1da177e4 LT |
574 | qh->hw_info2 |= qh->period |
575 | ? cpu_to_le32 (1 << uframe) | |
7dedacf4 | 576 | : __constant_cpu_to_le32 (QH_SMASK); |
1da177e4 LT |
577 | qh->hw_info2 |= c_mask; |
578 | } else | |
579 | ehci_dbg (ehci, "reused qh %p schedule\n", qh); | |
580 | ||
581 | /* stuff into the periodic schedule */ | |
582 | status = qh_link_periodic (ehci, qh); | |
583 | done: | |
584 | return status; | |
585 | } | |
586 | ||
587 | static int intr_submit ( | |
588 | struct ehci_hcd *ehci, | |
589 | struct usb_host_endpoint *ep, | |
590 | struct urb *urb, | |
591 | struct list_head *qtd_list, | |
5db539e4 | 592 | unsigned mem_flags |
1da177e4 LT |
593 | ) { |
594 | unsigned epnum; | |
595 | unsigned long flags; | |
596 | struct ehci_qh *qh; | |
597 | int status = 0; | |
598 | struct list_head empty; | |
599 | ||
600 | /* get endpoint and transfer/schedule data */ | |
601 | epnum = ep->desc.bEndpointAddress; | |
602 | ||
603 | spin_lock_irqsave (&ehci->lock, flags); | |
604 | ||
605 | /* get qh and force any scheduling errors */ | |
606 | INIT_LIST_HEAD (&empty); | |
607 | qh = qh_append_tds (ehci, urb, &empty, epnum, &ep->hcpriv); | |
608 | if (qh == NULL) { | |
609 | status = -ENOMEM; | |
610 | goto done; | |
611 | } | |
612 | if (qh->qh_state == QH_STATE_IDLE) { | |
613 | if ((status = qh_schedule (ehci, qh)) != 0) | |
614 | goto done; | |
615 | } | |
616 | ||
617 | /* then queue the urb's tds to the qh */ | |
618 | qh = qh_append_tds (ehci, urb, qtd_list, epnum, &ep->hcpriv); | |
619 | BUG_ON (qh == NULL); | |
620 | ||
621 | /* ... update usbfs periodic stats */ | |
622 | ehci_to_hcd(ehci)->self.bandwidth_int_reqs++; | |
623 | ||
624 | done: | |
625 | spin_unlock_irqrestore (&ehci->lock, flags); | |
626 | if (status) | |
627 | qtd_list_free (ehci, urb, qtd_list); | |
628 | ||
629 | return status; | |
630 | } | |
631 | ||
632 | /*-------------------------------------------------------------------------*/ | |
633 | ||
634 | /* ehci_iso_stream ops work with both ITD and SITD */ | |
635 | ||
636 | static struct ehci_iso_stream * | |
5db539e4 | 637 | iso_stream_alloc (unsigned mem_flags) |
1da177e4 LT |
638 | { |
639 | struct ehci_iso_stream *stream; | |
640 | ||
7b842b6e | 641 | stream = kzalloc(sizeof *stream, mem_flags); |
1da177e4 | 642 | if (likely (stream != NULL)) { |
1da177e4 LT |
643 | INIT_LIST_HEAD(&stream->td_list); |
644 | INIT_LIST_HEAD(&stream->free_list); | |
645 | stream->next_uframe = -1; | |
646 | stream->refcount = 1; | |
647 | } | |
648 | return stream; | |
649 | } | |
650 | ||
651 | static void | |
652 | iso_stream_init ( | |
653 | struct ehci_hcd *ehci, | |
654 | struct ehci_iso_stream *stream, | |
655 | struct usb_device *dev, | |
656 | int pipe, | |
657 | unsigned interval | |
658 | ) | |
659 | { | |
660 | static const u8 smask_out [] = { 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f }; | |
661 | ||
662 | u32 buf1; | |
663 | unsigned epnum, maxp; | |
664 | int is_input; | |
665 | long bandwidth; | |
666 | ||
667 | /* | |
668 | * this might be a "high bandwidth" highspeed endpoint, | |
669 | * as encoded in the ep descriptor's wMaxPacket field | |
670 | */ | |
671 | epnum = usb_pipeendpoint (pipe); | |
672 | is_input = usb_pipein (pipe) ? USB_DIR_IN : 0; | |
673 | maxp = usb_maxpacket(dev, pipe, !is_input); | |
674 | if (is_input) { | |
675 | buf1 = (1 << 11); | |
676 | } else { | |
677 | buf1 = 0; | |
678 | } | |
679 | ||
680 | /* knows about ITD vs SITD */ | |
681 | if (dev->speed == USB_SPEED_HIGH) { | |
682 | unsigned multi = hb_mult(maxp); | |
683 | ||
684 | stream->highspeed = 1; | |
685 | ||
686 | maxp = max_packet(maxp); | |
687 | buf1 |= maxp; | |
688 | maxp *= multi; | |
689 | ||
690 | stream->buf0 = cpu_to_le32 ((epnum << 8) | dev->devnum); | |
691 | stream->buf1 = cpu_to_le32 (buf1); | |
692 | stream->buf2 = cpu_to_le32 (multi); | |
693 | ||
694 | /* usbfs wants to report the average usecs per frame tied up | |
695 | * when transfers on this endpoint are scheduled ... | |
696 | */ | |
697 | stream->usecs = HS_USECS_ISO (maxp); | |
698 | bandwidth = stream->usecs * 8; | |
699 | bandwidth /= 1 << (interval - 1); | |
700 | ||
701 | } else { | |
702 | u32 addr; | |
703 | ||
704 | addr = dev->ttport << 24; | |
705 | if (!ehci_is_TDI(ehci) | |
706 | || (dev->tt->hub != | |
707 | ehci_to_hcd(ehci)->self.root_hub)) | |
708 | addr |= dev->tt->hub->devnum << 16; | |
709 | addr |= epnum << 8; | |
710 | addr |= dev->devnum; | |
711 | stream->usecs = HS_USECS_ISO (maxp); | |
712 | if (is_input) { | |
713 | u32 tmp; | |
714 | ||
715 | addr |= 1 << 31; | |
716 | stream->c_usecs = stream->usecs; | |
717 | stream->usecs = HS_USECS_ISO (1); | |
718 | stream->raw_mask = 1; | |
719 | ||
720 | /* pessimistic c-mask */ | |
721 | tmp = usb_calc_bus_time (USB_SPEED_FULL, 1, 0, maxp) | |
722 | / (125 * 1000); | |
723 | stream->raw_mask |= 3 << (tmp + 9); | |
724 | } else | |
725 | stream->raw_mask = smask_out [maxp / 188]; | |
726 | bandwidth = stream->usecs + stream->c_usecs; | |
727 | bandwidth /= 1 << (interval + 2); | |
728 | ||
729 | /* stream->splits gets created from raw_mask later */ | |
730 | stream->address = cpu_to_le32 (addr); | |
731 | } | |
732 | stream->bandwidth = bandwidth; | |
733 | ||
734 | stream->udev = dev; | |
735 | ||
736 | stream->bEndpointAddress = is_input | epnum; | |
737 | stream->interval = interval; | |
738 | stream->maxp = maxp; | |
739 | } | |
740 | ||
741 | static void | |
742 | iso_stream_put(struct ehci_hcd *ehci, struct ehci_iso_stream *stream) | |
743 | { | |
744 | stream->refcount--; | |
745 | ||
746 | /* free whenever just a dev->ep reference remains. | |
747 | * not like a QH -- no persistent state (toggle, halt) | |
748 | */ | |
749 | if (stream->refcount == 1) { | |
750 | int is_in; | |
751 | ||
752 | // BUG_ON (!list_empty(&stream->td_list)); | |
753 | ||
754 | while (!list_empty (&stream->free_list)) { | |
755 | struct list_head *entry; | |
756 | ||
757 | entry = stream->free_list.next; | |
758 | list_del (entry); | |
759 | ||
760 | /* knows about ITD vs SITD */ | |
761 | if (stream->highspeed) { | |
762 | struct ehci_itd *itd; | |
763 | ||
764 | itd = list_entry (entry, struct ehci_itd, | |
765 | itd_list); | |
766 | dma_pool_free (ehci->itd_pool, itd, | |
767 | itd->itd_dma); | |
768 | } else { | |
769 | struct ehci_sitd *sitd; | |
770 | ||
771 | sitd = list_entry (entry, struct ehci_sitd, | |
772 | sitd_list); | |
773 | dma_pool_free (ehci->sitd_pool, sitd, | |
774 | sitd->sitd_dma); | |
775 | } | |
776 | } | |
777 | ||
778 | is_in = (stream->bEndpointAddress & USB_DIR_IN) ? 0x10 : 0; | |
779 | stream->bEndpointAddress &= 0x0f; | |
780 | stream->ep->hcpriv = NULL; | |
781 | ||
782 | if (stream->rescheduled) { | |
783 | ehci_info (ehci, "ep%d%s-iso rescheduled " | |
784 | "%lu times in %lu seconds\n", | |
785 | stream->bEndpointAddress, is_in ? "in" : "out", | |
786 | stream->rescheduled, | |
787 | ((jiffies - stream->start)/HZ) | |
788 | ); | |
789 | } | |
790 | ||
791 | kfree(stream); | |
792 | } | |
793 | } | |
794 | ||
795 | static inline struct ehci_iso_stream * | |
796 | iso_stream_get (struct ehci_iso_stream *stream) | |
797 | { | |
798 | if (likely (stream != NULL)) | |
799 | stream->refcount++; | |
800 | return stream; | |
801 | } | |
802 | ||
803 | static struct ehci_iso_stream * | |
804 | iso_stream_find (struct ehci_hcd *ehci, struct urb *urb) | |
805 | { | |
806 | unsigned epnum; | |
807 | struct ehci_iso_stream *stream; | |
808 | struct usb_host_endpoint *ep; | |
809 | unsigned long flags; | |
810 | ||
811 | epnum = usb_pipeendpoint (urb->pipe); | |
812 | if (usb_pipein(urb->pipe)) | |
813 | ep = urb->dev->ep_in[epnum]; | |
814 | else | |
815 | ep = urb->dev->ep_out[epnum]; | |
816 | ||
817 | spin_lock_irqsave (&ehci->lock, flags); | |
818 | stream = ep->hcpriv; | |
819 | ||
820 | if (unlikely (stream == NULL)) { | |
821 | stream = iso_stream_alloc(GFP_ATOMIC); | |
822 | if (likely (stream != NULL)) { | |
823 | /* dev->ep owns the initial refcount */ | |
824 | ep->hcpriv = stream; | |
825 | stream->ep = ep; | |
826 | iso_stream_init(ehci, stream, urb->dev, urb->pipe, | |
827 | urb->interval); | |
828 | } | |
829 | ||
830 | /* if dev->ep [epnum] is a QH, info1.maxpacket is nonzero */ | |
831 | } else if (unlikely (stream->hw_info1 != 0)) { | |
832 | ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n", | |
833 | urb->dev->devpath, epnum, | |
834 | usb_pipein(urb->pipe) ? "in" : "out"); | |
835 | stream = NULL; | |
836 | } | |
837 | ||
838 | /* caller guarantees an eventual matching iso_stream_put */ | |
839 | stream = iso_stream_get (stream); | |
840 | ||
841 | spin_unlock_irqrestore (&ehci->lock, flags); | |
842 | return stream; | |
843 | } | |
844 | ||
845 | /*-------------------------------------------------------------------------*/ | |
846 | ||
847 | /* ehci_iso_sched ops can be ITD-only or SITD-only */ | |
848 | ||
849 | static struct ehci_iso_sched * | |
5db539e4 | 850 | iso_sched_alloc (unsigned packets, unsigned mem_flags) |
1da177e4 LT |
851 | { |
852 | struct ehci_iso_sched *iso_sched; | |
853 | int size = sizeof *iso_sched; | |
854 | ||
855 | size += packets * sizeof (struct ehci_iso_packet); | |
856 | iso_sched = kmalloc (size, mem_flags); | |
857 | if (likely (iso_sched != NULL)) { | |
858 | memset(iso_sched, 0, size); | |
859 | INIT_LIST_HEAD (&iso_sched->td_list); | |
860 | } | |
861 | return iso_sched; | |
862 | } | |
863 | ||
864 | static inline void | |
865 | itd_sched_init ( | |
866 | struct ehci_iso_sched *iso_sched, | |
867 | struct ehci_iso_stream *stream, | |
868 | struct urb *urb | |
869 | ) | |
870 | { | |
871 | unsigned i; | |
872 | dma_addr_t dma = urb->transfer_dma; | |
873 | ||
874 | /* how many uframes are needed for these transfers */ | |
875 | iso_sched->span = urb->number_of_packets * stream->interval; | |
876 | ||
877 | /* figure out per-uframe itd fields that we'll need later | |
878 | * when we fit new itds into the schedule. | |
879 | */ | |
880 | for (i = 0; i < urb->number_of_packets; i++) { | |
881 | struct ehci_iso_packet *uframe = &iso_sched->packet [i]; | |
882 | unsigned length; | |
883 | dma_addr_t buf; | |
884 | u32 trans; | |
885 | ||
886 | length = urb->iso_frame_desc [i].length; | |
887 | buf = dma + urb->iso_frame_desc [i].offset; | |
888 | ||
889 | trans = EHCI_ISOC_ACTIVE; | |
890 | trans |= buf & 0x0fff; | |
891 | if (unlikely (((i + 1) == urb->number_of_packets)) | |
892 | && !(urb->transfer_flags & URB_NO_INTERRUPT)) | |
893 | trans |= EHCI_ITD_IOC; | |
894 | trans |= length << 16; | |
895 | uframe->transaction = cpu_to_le32 (trans); | |
896 | ||
77078570 | 897 | /* might need to cross a buffer page within a uframe */ |
1da177e4 LT |
898 | uframe->bufp = (buf & ~(u64)0x0fff); |
899 | buf += length; | |
900 | if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff)))) | |
901 | uframe->cross = 1; | |
902 | } | |
903 | } | |
904 | ||
905 | static void | |
906 | iso_sched_free ( | |
907 | struct ehci_iso_stream *stream, | |
908 | struct ehci_iso_sched *iso_sched | |
909 | ) | |
910 | { | |
911 | if (!iso_sched) | |
912 | return; | |
913 | // caller must hold ehci->lock! | |
914 | list_splice (&iso_sched->td_list, &stream->free_list); | |
915 | kfree (iso_sched); | |
916 | } | |
917 | ||
918 | static int | |
919 | itd_urb_transaction ( | |
920 | struct ehci_iso_stream *stream, | |
921 | struct ehci_hcd *ehci, | |
922 | struct urb *urb, | |
5db539e4 | 923 | unsigned mem_flags |
1da177e4 LT |
924 | ) |
925 | { | |
926 | struct ehci_itd *itd; | |
927 | dma_addr_t itd_dma; | |
928 | int i; | |
929 | unsigned num_itds; | |
930 | struct ehci_iso_sched *sched; | |
931 | unsigned long flags; | |
932 | ||
933 | sched = iso_sched_alloc (urb->number_of_packets, mem_flags); | |
934 | if (unlikely (sched == NULL)) | |
935 | return -ENOMEM; | |
936 | ||
937 | itd_sched_init (sched, stream, urb); | |
938 | ||
939 | if (urb->interval < 8) | |
940 | num_itds = 1 + (sched->span + 7) / 8; | |
941 | else | |
942 | num_itds = urb->number_of_packets; | |
943 | ||
944 | /* allocate/init ITDs */ | |
945 | spin_lock_irqsave (&ehci->lock, flags); | |
946 | for (i = 0; i < num_itds; i++) { | |
947 | ||
948 | /* free_list.next might be cache-hot ... but maybe | |
949 | * the HC caches it too. avoid that issue for now. | |
950 | */ | |
951 | ||
952 | /* prefer previously-allocated itds */ | |
953 | if (likely (!list_empty(&stream->free_list))) { | |
954 | itd = list_entry (stream->free_list.prev, | |
955 | struct ehci_itd, itd_list); | |
956 | list_del (&itd->itd_list); | |
957 | itd_dma = itd->itd_dma; | |
958 | } else | |
959 | itd = NULL; | |
960 | ||
961 | if (!itd) { | |
962 | spin_unlock_irqrestore (&ehci->lock, flags); | |
963 | itd = dma_pool_alloc (ehci->itd_pool, mem_flags, | |
964 | &itd_dma); | |
965 | spin_lock_irqsave (&ehci->lock, flags); | |
966 | } | |
967 | ||
968 | if (unlikely (NULL == itd)) { | |
969 | iso_sched_free (stream, sched); | |
970 | spin_unlock_irqrestore (&ehci->lock, flags); | |
971 | return -ENOMEM; | |
972 | } | |
973 | memset (itd, 0, sizeof *itd); | |
974 | itd->itd_dma = itd_dma; | |
975 | list_add (&itd->itd_list, &sched->td_list); | |
976 | } | |
977 | spin_unlock_irqrestore (&ehci->lock, flags); | |
978 | ||
979 | /* temporarily store schedule info in hcpriv */ | |
980 | urb->hcpriv = sched; | |
981 | urb->error_count = 0; | |
982 | return 0; | |
983 | } | |
984 | ||
985 | /*-------------------------------------------------------------------------*/ | |
986 | ||
987 | static inline int | |
988 | itd_slot_ok ( | |
989 | struct ehci_hcd *ehci, | |
990 | u32 mod, | |
991 | u32 uframe, | |
992 | u8 usecs, | |
993 | u32 period | |
994 | ) | |
995 | { | |
996 | uframe %= period; | |
997 | do { | |
998 | /* can't commit more than 80% periodic == 100 usec */ | |
999 | if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7) | |
1000 | > (100 - usecs)) | |
1001 | return 0; | |
1002 | ||
1003 | /* we know urb->interval is 2^N uframes */ | |
1004 | uframe += period; | |
1005 | } while (uframe < mod); | |
1006 | return 1; | |
1007 | } | |
1008 | ||
1009 | static inline int | |
1010 | sitd_slot_ok ( | |
1011 | struct ehci_hcd *ehci, | |
1012 | u32 mod, | |
1013 | struct ehci_iso_stream *stream, | |
1014 | u32 uframe, | |
1015 | struct ehci_iso_sched *sched, | |
1016 | u32 period_uframes | |
1017 | ) | |
1018 | { | |
1019 | u32 mask, tmp; | |
1020 | u32 frame, uf; | |
1021 | ||
1022 | mask = stream->raw_mask << (uframe & 7); | |
1023 | ||
1024 | /* for IN, don't wrap CSPLIT into the next frame */ | |
1025 | if (mask & ~0xffff) | |
1026 | return 0; | |
1027 | ||
1028 | /* this multi-pass logic is simple, but performance may | |
1029 | * suffer when the schedule data isn't cached. | |
1030 | */ | |
1031 | ||
1032 | /* check bandwidth */ | |
1033 | uframe %= period_uframes; | |
1034 | do { | |
1035 | u32 max_used; | |
1036 | ||
1037 | frame = uframe >> 3; | |
1038 | uf = uframe & 7; | |
1039 | ||
1040 | /* tt must be idle for start(s), any gap, and csplit. | |
1041 | * assume scheduling slop leaves 10+% for control/bulk. | |
1042 | */ | |
1043 | if (!tt_no_collision (ehci, period_uframes << 3, | |
1044 | stream->udev, frame, mask)) | |
1045 | return 0; | |
1046 | ||
1047 | /* check starts (OUT uses more than one) */ | |
1048 | max_used = 100 - stream->usecs; | |
1049 | for (tmp = stream->raw_mask & 0xff; tmp; tmp >>= 1, uf++) { | |
1050 | if (periodic_usecs (ehci, frame, uf) > max_used) | |
1051 | return 0; | |
1052 | } | |
1053 | ||
1054 | /* for IN, check CSPLIT */ | |
1055 | if (stream->c_usecs) { | |
1056 | max_used = 100 - stream->c_usecs; | |
1057 | do { | |
1058 | tmp = 1 << uf; | |
1059 | tmp <<= 8; | |
1060 | if ((stream->raw_mask & tmp) == 0) | |
1061 | continue; | |
1062 | if (periodic_usecs (ehci, frame, uf) | |
1063 | > max_used) | |
1064 | return 0; | |
1065 | } while (++uf < 8); | |
1066 | } | |
1067 | ||
1068 | /* we know urb->interval is 2^N uframes */ | |
1069 | uframe += period_uframes; | |
1070 | } while (uframe < mod); | |
1071 | ||
1072 | stream->splits = cpu_to_le32(stream->raw_mask << (uframe & 7)); | |
1073 | return 1; | |
1074 | } | |
1075 | ||
1076 | /* | |
1077 | * This scheduler plans almost as far into the future as it has actual | |
1078 | * periodic schedule slots. (Affected by TUNE_FLS, which defaults to | |
1079 | * "as small as possible" to be cache-friendlier.) That limits the size | |
1080 | * transfers you can stream reliably; avoid more than 64 msec per urb. | |
1081 | * Also avoid queue depths of less than ehci's worst irq latency (affected | |
1082 | * by the per-urb URB_NO_INTERRUPT hint, the log2_irq_thresh module parameter, | |
1083 | * and other factors); or more than about 230 msec total (for portability, | |
1084 | * given EHCI_TUNE_FLS and the slop). Or, write a smarter scheduler! | |
1085 | */ | |
1086 | ||
1087 | #define SCHEDULE_SLOP 10 /* frames */ | |
1088 | ||
1089 | static int | |
1090 | iso_stream_schedule ( | |
1091 | struct ehci_hcd *ehci, | |
1092 | struct urb *urb, | |
1093 | struct ehci_iso_stream *stream | |
1094 | ) | |
1095 | { | |
1096 | u32 now, start, max, period; | |
1097 | int status; | |
1098 | unsigned mod = ehci->periodic_size << 3; | |
1099 | struct ehci_iso_sched *sched = urb->hcpriv; | |
1100 | ||
1101 | if (sched->span > (mod - 8 * SCHEDULE_SLOP)) { | |
1102 | ehci_dbg (ehci, "iso request %p too long\n", urb); | |
1103 | status = -EFBIG; | |
1104 | goto fail; | |
1105 | } | |
1106 | ||
1107 | if ((stream->depth + sched->span) > mod) { | |
1108 | ehci_dbg (ehci, "request %p would overflow (%d+%d>%d)\n", | |
1109 | urb, stream->depth, sched->span, mod); | |
1110 | status = -EFBIG; | |
1111 | goto fail; | |
1112 | } | |
1113 | ||
1114 | now = readl (&ehci->regs->frame_index) % mod; | |
1115 | ||
1116 | /* when's the last uframe this urb could start? */ | |
1117 | max = now + mod; | |
1118 | ||
1119 | /* typical case: reuse current schedule. stream is still active, | |
1120 | * and no gaps from host falling behind (irq delays etc) | |
1121 | */ | |
1122 | if (likely (!list_empty (&stream->td_list))) { | |
1123 | start = stream->next_uframe; | |
1124 | if (start < now) | |
1125 | start += mod; | |
1126 | if (likely ((start + sched->span) < max)) | |
1127 | goto ready; | |
1128 | /* else fell behind; someday, try to reschedule */ | |
1129 | status = -EL2NSYNC; | |
1130 | goto fail; | |
1131 | } | |
1132 | ||
1133 | /* need to schedule; when's the next (u)frame we could start? | |
1134 | * this is bigger than ehci->i_thresh allows; scheduling itself | |
1135 | * isn't free, the slop should handle reasonably slow cpus. it | |
1136 | * can also help high bandwidth if the dma and irq loads don't | |
1137 | * jump until after the queue is primed. | |
1138 | */ | |
1139 | start = SCHEDULE_SLOP * 8 + (now & ~0x07); | |
1140 | start %= mod; | |
1141 | stream->next_uframe = start; | |
1142 | ||
1143 | /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */ | |
1144 | ||
1145 | period = urb->interval; | |
1146 | if (!stream->highspeed) | |
1147 | period <<= 3; | |
1148 | ||
1149 | /* find a uframe slot with enough bandwidth */ | |
1150 | for (; start < (stream->next_uframe + period); start++) { | |
1151 | int enough_space; | |
1152 | ||
1153 | /* check schedule: enough space? */ | |
1154 | if (stream->highspeed) | |
1155 | enough_space = itd_slot_ok (ehci, mod, start, | |
1156 | stream->usecs, period); | |
1157 | else { | |
1158 | if ((start % 8) >= 6) | |
1159 | continue; | |
1160 | enough_space = sitd_slot_ok (ehci, mod, stream, | |
1161 | start, sched, period); | |
1162 | } | |
1163 | ||
1164 | /* schedule it here if there's enough bandwidth */ | |
1165 | if (enough_space) { | |
1166 | stream->next_uframe = start % mod; | |
1167 | goto ready; | |
1168 | } | |
1169 | } | |
1170 | ||
1171 | /* no room in the schedule */ | |
1172 | ehci_dbg (ehci, "iso %ssched full %p (now %d max %d)\n", | |
1173 | list_empty (&stream->td_list) ? "" : "re", | |
1174 | urb, now, max); | |
1175 | status = -ENOSPC; | |
1176 | ||
1177 | fail: | |
1178 | iso_sched_free (stream, sched); | |
1179 | urb->hcpriv = NULL; | |
1180 | return status; | |
1181 | ||
1182 | ready: | |
1183 | /* report high speed start in uframes; full speed, in frames */ | |
1184 | urb->start_frame = stream->next_uframe; | |
1185 | if (!stream->highspeed) | |
1186 | urb->start_frame >>= 3; | |
1187 | return 0; | |
1188 | } | |
1189 | ||
1190 | /*-------------------------------------------------------------------------*/ | |
1191 | ||
1192 | static inline void | |
1193 | itd_init (struct ehci_iso_stream *stream, struct ehci_itd *itd) | |
1194 | { | |
1195 | int i; | |
1196 | ||
77078570 | 1197 | /* it's been recently zeroed */ |
1da177e4 LT |
1198 | itd->hw_next = EHCI_LIST_END; |
1199 | itd->hw_bufp [0] = stream->buf0; | |
1200 | itd->hw_bufp [1] = stream->buf1; | |
1201 | itd->hw_bufp [2] = stream->buf2; | |
1202 | ||
1203 | for (i = 0; i < 8; i++) | |
1204 | itd->index[i] = -1; | |
1205 | ||
1206 | /* All other fields are filled when scheduling */ | |
1207 | } | |
1208 | ||
1209 | static inline void | |
1210 | itd_patch ( | |
1211 | struct ehci_itd *itd, | |
1212 | struct ehci_iso_sched *iso_sched, | |
1213 | unsigned index, | |
77078570 | 1214 | u16 uframe |
1da177e4 LT |
1215 | ) |
1216 | { | |
1217 | struct ehci_iso_packet *uf = &iso_sched->packet [index]; | |
1218 | unsigned pg = itd->pg; | |
1219 | ||
1220 | // BUG_ON (pg == 6 && uf->cross); | |
1221 | ||
1222 | uframe &= 0x07; | |
1223 | itd->index [uframe] = index; | |
1224 | ||
1225 | itd->hw_transaction [uframe] = uf->transaction; | |
1226 | itd->hw_transaction [uframe] |= cpu_to_le32 (pg << 12); | |
1227 | itd->hw_bufp [pg] |= cpu_to_le32 (uf->bufp & ~(u32)0); | |
1228 | itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(uf->bufp >> 32)); | |
1229 | ||
1230 | /* iso_frame_desc[].offset must be strictly increasing */ | |
77078570 | 1231 | if (unlikely (uf->cross)) { |
1da177e4 LT |
1232 | u64 bufp = uf->bufp + 4096; |
1233 | itd->pg = ++pg; | |
1234 | itd->hw_bufp [pg] |= cpu_to_le32 (bufp & ~(u32)0); | |
1235 | itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(bufp >> 32)); | |
1236 | } | |
1237 | } | |
1238 | ||
1239 | static inline void | |
1240 | itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd) | |
1241 | { | |
1242 | /* always prepend ITD/SITD ... only QH tree is order-sensitive */ | |
1243 | itd->itd_next = ehci->pshadow [frame]; | |
1244 | itd->hw_next = ehci->periodic [frame]; | |
1245 | ehci->pshadow [frame].itd = itd; | |
1246 | itd->frame = frame; | |
1247 | wmb (); | |
1248 | ehci->periodic [frame] = cpu_to_le32 (itd->itd_dma) | Q_TYPE_ITD; | |
1249 | } | |
1250 | ||
1251 | /* fit urb's itds into the selected schedule slot; activate as needed */ | |
1252 | static int | |
1253 | itd_link_urb ( | |
1254 | struct ehci_hcd *ehci, | |
1255 | struct urb *urb, | |
1256 | unsigned mod, | |
1257 | struct ehci_iso_stream *stream | |
1258 | ) | |
1259 | { | |
77078570 | 1260 | int packet; |
1da177e4 LT |
1261 | unsigned next_uframe, uframe, frame; |
1262 | struct ehci_iso_sched *iso_sched = urb->hcpriv; | |
1263 | struct ehci_itd *itd; | |
1264 | ||
1265 | next_uframe = stream->next_uframe % mod; | |
1266 | ||
1267 | if (unlikely (list_empty(&stream->td_list))) { | |
1268 | ehci_to_hcd(ehci)->self.bandwidth_allocated | |
1269 | += stream->bandwidth; | |
1270 | ehci_vdbg (ehci, | |
1271 | "schedule devp %s ep%d%s-iso period %d start %d.%d\n", | |
1272 | urb->dev->devpath, stream->bEndpointAddress & 0x0f, | |
1273 | (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out", | |
1274 | urb->interval, | |
1275 | next_uframe >> 3, next_uframe & 0x7); | |
1276 | stream->start = jiffies; | |
1277 | } | |
1278 | ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; | |
1279 | ||
1280 | /* fill iTDs uframe by uframe */ | |
1281 | for (packet = 0, itd = NULL; packet < urb->number_of_packets; ) { | |
1282 | if (itd == NULL) { | |
1283 | /* ASSERT: we have all necessary itds */ | |
1284 | // BUG_ON (list_empty (&iso_sched->td_list)); | |
1285 | ||
1286 | /* ASSERT: no itds for this endpoint in this uframe */ | |
1287 | ||
1288 | itd = list_entry (iso_sched->td_list.next, | |
1289 | struct ehci_itd, itd_list); | |
1290 | list_move_tail (&itd->itd_list, &stream->td_list); | |
1291 | itd->stream = iso_stream_get (stream); | |
1292 | itd->urb = usb_get_urb (urb); | |
1da177e4 LT |
1293 | itd_init (stream, itd); |
1294 | } | |
1295 | ||
1296 | uframe = next_uframe & 0x07; | |
1297 | frame = next_uframe >> 3; | |
1298 | ||
1299 | itd->usecs [uframe] = stream->usecs; | |
77078570 | 1300 | itd_patch (itd, iso_sched, packet, uframe); |
1da177e4 LT |
1301 | |
1302 | next_uframe += stream->interval; | |
1303 | stream->depth += stream->interval; | |
1304 | next_uframe %= mod; | |
1305 | packet++; | |
1306 | ||
1307 | /* link completed itds into the schedule */ | |
1308 | if (((next_uframe >> 3) != frame) | |
1309 | || packet == urb->number_of_packets) { | |
1310 | itd_link (ehci, frame % ehci->periodic_size, itd); | |
1311 | itd = NULL; | |
1312 | } | |
1313 | } | |
1314 | stream->next_uframe = next_uframe; | |
1315 | ||
1316 | /* don't need that schedule data any more */ | |
1317 | iso_sched_free (stream, iso_sched); | |
1318 | urb->hcpriv = NULL; | |
1319 | ||
1320 | timer_action (ehci, TIMER_IO_WATCHDOG); | |
1321 | if (unlikely (!ehci->periodic_sched++)) | |
1322 | return enable_periodic (ehci); | |
1323 | return 0; | |
1324 | } | |
1325 | ||
1326 | #define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR) | |
1327 | ||
1328 | static unsigned | |
1329 | itd_complete ( | |
1330 | struct ehci_hcd *ehci, | |
1331 | struct ehci_itd *itd, | |
1332 | struct pt_regs *regs | |
1333 | ) { | |
1334 | struct urb *urb = itd->urb; | |
1335 | struct usb_iso_packet_descriptor *desc; | |
1336 | u32 t; | |
1337 | unsigned uframe; | |
1338 | int urb_index = -1; | |
1339 | struct ehci_iso_stream *stream = itd->stream; | |
1340 | struct usb_device *dev; | |
1341 | ||
1342 | /* for each uframe with a packet */ | |
1343 | for (uframe = 0; uframe < 8; uframe++) { | |
1344 | if (likely (itd->index[uframe] == -1)) | |
1345 | continue; | |
1346 | urb_index = itd->index[uframe]; | |
1347 | desc = &urb->iso_frame_desc [urb_index]; | |
1348 | ||
1349 | t = le32_to_cpup (&itd->hw_transaction [uframe]); | |
1350 | itd->hw_transaction [uframe] = 0; | |
1351 | stream->depth -= stream->interval; | |
1352 | ||
1353 | /* report transfer status */ | |
1354 | if (unlikely (t & ISO_ERRS)) { | |
1355 | urb->error_count++; | |
1356 | if (t & EHCI_ISOC_BUF_ERR) | |
1357 | desc->status = usb_pipein (urb->pipe) | |
1358 | ? -ENOSR /* hc couldn't read */ | |
1359 | : -ECOMM; /* hc couldn't write */ | |
1360 | else if (t & EHCI_ISOC_BABBLE) | |
1361 | desc->status = -EOVERFLOW; | |
1362 | else /* (t & EHCI_ISOC_XACTERR) */ | |
1363 | desc->status = -EPROTO; | |
1364 | ||
1365 | /* HC need not update length with this error */ | |
1366 | if (!(t & EHCI_ISOC_BABBLE)) | |
1367 | desc->actual_length = EHCI_ITD_LENGTH (t); | |
1368 | } else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) { | |
1369 | desc->status = 0; | |
1370 | desc->actual_length = EHCI_ITD_LENGTH (t); | |
1371 | } | |
1372 | } | |
1373 | ||
1374 | usb_put_urb (urb); | |
1375 | itd->urb = NULL; | |
1376 | itd->stream = NULL; | |
1377 | list_move (&itd->itd_list, &stream->free_list); | |
1378 | iso_stream_put (ehci, stream); | |
1379 | ||
1380 | /* handle completion now? */ | |
1381 | if (likely ((urb_index + 1) != urb->number_of_packets)) | |
1382 | return 0; | |
1383 | ||
1384 | /* ASSERT: it's really the last itd for this urb | |
1385 | list_for_each_entry (itd, &stream->td_list, itd_list) | |
1386 | BUG_ON (itd->urb == urb); | |
1387 | */ | |
1388 | ||
1389 | /* give urb back to the driver ... can be out-of-order */ | |
1390 | dev = usb_get_dev (urb->dev); | |
1391 | ehci_urb_done (ehci, urb, regs); | |
1392 | urb = NULL; | |
1393 | ||
1394 | /* defer stopping schedule; completion can submit */ | |
1395 | ehci->periodic_sched--; | |
1396 | if (unlikely (!ehci->periodic_sched)) | |
1397 | (void) disable_periodic (ehci); | |
1398 | ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; | |
1399 | ||
1400 | if (unlikely (list_empty (&stream->td_list))) { | |
1401 | ehci_to_hcd(ehci)->self.bandwidth_allocated | |
1402 | -= stream->bandwidth; | |
1403 | ehci_vdbg (ehci, | |
1404 | "deschedule devp %s ep%d%s-iso\n", | |
1405 | dev->devpath, stream->bEndpointAddress & 0x0f, | |
1406 | (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); | |
1407 | } | |
1408 | iso_stream_put (ehci, stream); | |
1409 | usb_put_dev (dev); | |
1410 | ||
1411 | return 1; | |
1412 | } | |
1413 | ||
1414 | /*-------------------------------------------------------------------------*/ | |
1415 | ||
5db539e4 OK |
1416 | static int itd_submit (struct ehci_hcd *ehci, struct urb *urb, |
1417 | unsigned mem_flags) | |
1da177e4 LT |
1418 | { |
1419 | int status = -EINVAL; | |
1420 | unsigned long flags; | |
1421 | struct ehci_iso_stream *stream; | |
1422 | ||
1423 | /* Get iso_stream head */ | |
1424 | stream = iso_stream_find (ehci, urb); | |
1425 | if (unlikely (stream == NULL)) { | |
1426 | ehci_dbg (ehci, "can't get iso stream\n"); | |
1427 | return -ENOMEM; | |
1428 | } | |
1429 | if (unlikely (urb->interval != stream->interval)) { | |
1430 | ehci_dbg (ehci, "can't change iso interval %d --> %d\n", | |
1431 | stream->interval, urb->interval); | |
1432 | goto done; | |
1433 | } | |
1434 | ||
1435 | #ifdef EHCI_URB_TRACE | |
1436 | ehci_dbg (ehci, | |
1437 | "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n", | |
1438 | __FUNCTION__, urb->dev->devpath, urb, | |
1439 | usb_pipeendpoint (urb->pipe), | |
1440 | usb_pipein (urb->pipe) ? "in" : "out", | |
1441 | urb->transfer_buffer_length, | |
1442 | urb->number_of_packets, urb->interval, | |
1443 | stream); | |
1444 | #endif | |
1445 | ||
1446 | /* allocate ITDs w/o locking anything */ | |
1447 | status = itd_urb_transaction (stream, ehci, urb, mem_flags); | |
1448 | if (unlikely (status < 0)) { | |
1449 | ehci_dbg (ehci, "can't init itds\n"); | |
1450 | goto done; | |
1451 | } | |
1452 | ||
1453 | /* schedule ... need to lock */ | |
1454 | spin_lock_irqsave (&ehci->lock, flags); | |
1455 | status = iso_stream_schedule (ehci, urb, stream); | |
1456 | if (likely (status == 0)) | |
1457 | itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream); | |
1458 | spin_unlock_irqrestore (&ehci->lock, flags); | |
1459 | ||
1460 | done: | |
1461 | if (unlikely (status < 0)) | |
1462 | iso_stream_put (ehci, stream); | |
1463 | return status; | |
1464 | } | |
1465 | ||
1466 | #ifdef CONFIG_USB_EHCI_SPLIT_ISO | |
1467 | ||
1468 | /*-------------------------------------------------------------------------*/ | |
1469 | ||
1470 | /* | |
1471 | * "Split ISO TDs" ... used for USB 1.1 devices going through the | |
1472 | * TTs in USB 2.0 hubs. These need microframe scheduling. | |
1473 | */ | |
1474 | ||
1475 | static inline void | |
1476 | sitd_sched_init ( | |
1477 | struct ehci_iso_sched *iso_sched, | |
1478 | struct ehci_iso_stream *stream, | |
1479 | struct urb *urb | |
1480 | ) | |
1481 | { | |
1482 | unsigned i; | |
1483 | dma_addr_t dma = urb->transfer_dma; | |
1484 | ||
1485 | /* how many frames are needed for these transfers */ | |
1486 | iso_sched->span = urb->number_of_packets * stream->interval; | |
1487 | ||
1488 | /* figure out per-frame sitd fields that we'll need later | |
1489 | * when we fit new sitds into the schedule. | |
1490 | */ | |
1491 | for (i = 0; i < urb->number_of_packets; i++) { | |
1492 | struct ehci_iso_packet *packet = &iso_sched->packet [i]; | |
1493 | unsigned length; | |
1494 | dma_addr_t buf; | |
1495 | u32 trans; | |
1496 | ||
1497 | length = urb->iso_frame_desc [i].length & 0x03ff; | |
1498 | buf = dma + urb->iso_frame_desc [i].offset; | |
1499 | ||
1500 | trans = SITD_STS_ACTIVE; | |
1501 | if (((i + 1) == urb->number_of_packets) | |
1502 | && !(urb->transfer_flags & URB_NO_INTERRUPT)) | |
1503 | trans |= SITD_IOC; | |
1504 | trans |= length << 16; | |
1505 | packet->transaction = cpu_to_le32 (trans); | |
1506 | ||
1507 | /* might need to cross a buffer page within a td */ | |
1508 | packet->bufp = buf; | |
1509 | packet->buf1 = (buf + length) & ~0x0fff; | |
1510 | if (packet->buf1 != (buf & ~(u64)0x0fff)) | |
1511 | packet->cross = 1; | |
1512 | ||
1513 | /* OUT uses multiple start-splits */ | |
1514 | if (stream->bEndpointAddress & USB_DIR_IN) | |
1515 | continue; | |
1516 | length = (length + 187) / 188; | |
1517 | if (length > 1) /* BEGIN vs ALL */ | |
1518 | length |= 1 << 3; | |
1519 | packet->buf1 |= length; | |
1520 | } | |
1521 | } | |
1522 | ||
1523 | static int | |
1524 | sitd_urb_transaction ( | |
1525 | struct ehci_iso_stream *stream, | |
1526 | struct ehci_hcd *ehci, | |
1527 | struct urb *urb, | |
5db539e4 | 1528 | unsigned mem_flags |
1da177e4 LT |
1529 | ) |
1530 | { | |
1531 | struct ehci_sitd *sitd; | |
1532 | dma_addr_t sitd_dma; | |
1533 | int i; | |
1534 | struct ehci_iso_sched *iso_sched; | |
1535 | unsigned long flags; | |
1536 | ||
1537 | iso_sched = iso_sched_alloc (urb->number_of_packets, mem_flags); | |
1538 | if (iso_sched == NULL) | |
1539 | return -ENOMEM; | |
1540 | ||
1541 | sitd_sched_init (iso_sched, stream, urb); | |
1542 | ||
1543 | /* allocate/init sITDs */ | |
1544 | spin_lock_irqsave (&ehci->lock, flags); | |
1545 | for (i = 0; i < urb->number_of_packets; i++) { | |
1546 | ||
1547 | /* NOTE: for now, we don't try to handle wraparound cases | |
1548 | * for IN (using sitd->hw_backpointer, like a FSTN), which | |
1549 | * means we never need two sitds for full speed packets. | |
1550 | */ | |
1551 | ||
1552 | /* free_list.next might be cache-hot ... but maybe | |
1553 | * the HC caches it too. avoid that issue for now. | |
1554 | */ | |
1555 | ||
1556 | /* prefer previously-allocated sitds */ | |
1557 | if (!list_empty(&stream->free_list)) { | |
1558 | sitd = list_entry (stream->free_list.prev, | |
1559 | struct ehci_sitd, sitd_list); | |
1560 | list_del (&sitd->sitd_list); | |
1561 | sitd_dma = sitd->sitd_dma; | |
1562 | } else | |
1563 | sitd = NULL; | |
1564 | ||
1565 | if (!sitd) { | |
1566 | spin_unlock_irqrestore (&ehci->lock, flags); | |
1567 | sitd = dma_pool_alloc (ehci->sitd_pool, mem_flags, | |
1568 | &sitd_dma); | |
1569 | spin_lock_irqsave (&ehci->lock, flags); | |
1570 | } | |
1571 | ||
1572 | if (!sitd) { | |
1573 | iso_sched_free (stream, iso_sched); | |
1574 | spin_unlock_irqrestore (&ehci->lock, flags); | |
1575 | return -ENOMEM; | |
1576 | } | |
1577 | memset (sitd, 0, sizeof *sitd); | |
1578 | sitd->sitd_dma = sitd_dma; | |
1579 | list_add (&sitd->sitd_list, &iso_sched->td_list); | |
1580 | } | |
1581 | ||
1582 | /* temporarily store schedule info in hcpriv */ | |
1583 | urb->hcpriv = iso_sched; | |
1584 | urb->error_count = 0; | |
1585 | ||
1586 | spin_unlock_irqrestore (&ehci->lock, flags); | |
1587 | return 0; | |
1588 | } | |
1589 | ||
1590 | /*-------------------------------------------------------------------------*/ | |
1591 | ||
1592 | static inline void | |
1593 | sitd_patch ( | |
1594 | struct ehci_iso_stream *stream, | |
1595 | struct ehci_sitd *sitd, | |
1596 | struct ehci_iso_sched *iso_sched, | |
1597 | unsigned index | |
1598 | ) | |
1599 | { | |
1600 | struct ehci_iso_packet *uf = &iso_sched->packet [index]; | |
1601 | u64 bufp = uf->bufp; | |
1602 | ||
1603 | sitd->hw_next = EHCI_LIST_END; | |
1604 | sitd->hw_fullspeed_ep = stream->address; | |
1605 | sitd->hw_uframe = stream->splits; | |
1606 | sitd->hw_results = uf->transaction; | |
1607 | sitd->hw_backpointer = EHCI_LIST_END; | |
1608 | ||
1609 | bufp = uf->bufp; | |
1610 | sitd->hw_buf [0] = cpu_to_le32 (bufp); | |
1611 | sitd->hw_buf_hi [0] = cpu_to_le32 (bufp >> 32); | |
1612 | ||
1613 | sitd->hw_buf [1] = cpu_to_le32 (uf->buf1); | |
1614 | if (uf->cross) | |
1615 | bufp += 4096; | |
1616 | sitd->hw_buf_hi [1] = cpu_to_le32 (bufp >> 32); | |
1617 | sitd->index = index; | |
1618 | } | |
1619 | ||
1620 | static inline void | |
1621 | sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd) | |
1622 | { | |
1623 | /* note: sitd ordering could matter (CSPLIT then SSPLIT) */ | |
1624 | sitd->sitd_next = ehci->pshadow [frame]; | |
1625 | sitd->hw_next = ehci->periodic [frame]; | |
1626 | ehci->pshadow [frame].sitd = sitd; | |
1627 | sitd->frame = frame; | |
1628 | wmb (); | |
1629 | ehci->periodic [frame] = cpu_to_le32 (sitd->sitd_dma) | Q_TYPE_SITD; | |
1630 | } | |
1631 | ||
1632 | /* fit urb's sitds into the selected schedule slot; activate as needed */ | |
1633 | static int | |
1634 | sitd_link_urb ( | |
1635 | struct ehci_hcd *ehci, | |
1636 | struct urb *urb, | |
1637 | unsigned mod, | |
1638 | struct ehci_iso_stream *stream | |
1639 | ) | |
1640 | { | |
1641 | int packet; | |
1642 | unsigned next_uframe; | |
1643 | struct ehci_iso_sched *sched = urb->hcpriv; | |
1644 | struct ehci_sitd *sitd; | |
1645 | ||
1646 | next_uframe = stream->next_uframe; | |
1647 | ||
1648 | if (list_empty(&stream->td_list)) { | |
1649 | /* usbfs ignores TT bandwidth */ | |
1650 | ehci_to_hcd(ehci)->self.bandwidth_allocated | |
1651 | += stream->bandwidth; | |
1652 | ehci_vdbg (ehci, | |
1653 | "sched devp %s ep%d%s-iso [%d] %dms/%04x\n", | |
1654 | urb->dev->devpath, stream->bEndpointAddress & 0x0f, | |
1655 | (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out", | |
1656 | (next_uframe >> 3) % ehci->periodic_size, | |
1657 | stream->interval, le32_to_cpu (stream->splits)); | |
1658 | stream->start = jiffies; | |
1659 | } | |
1660 | ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; | |
1661 | ||
1662 | /* fill sITDs frame by frame */ | |
1663 | for (packet = 0, sitd = NULL; | |
1664 | packet < urb->number_of_packets; | |
1665 | packet++) { | |
1666 | ||
1667 | /* ASSERT: we have all necessary sitds */ | |
1668 | BUG_ON (list_empty (&sched->td_list)); | |
1669 | ||
1670 | /* ASSERT: no itds for this endpoint in this frame */ | |
1671 | ||
1672 | sitd = list_entry (sched->td_list.next, | |
1673 | struct ehci_sitd, sitd_list); | |
1674 | list_move_tail (&sitd->sitd_list, &stream->td_list); | |
1675 | sitd->stream = iso_stream_get (stream); | |
1676 | sitd->urb = usb_get_urb (urb); | |
1677 | ||
1678 | sitd_patch (stream, sitd, sched, packet); | |
1679 | sitd_link (ehci, (next_uframe >> 3) % ehci->periodic_size, | |
1680 | sitd); | |
1681 | ||
1682 | next_uframe += stream->interval << 3; | |
1683 | stream->depth += stream->interval << 3; | |
1684 | } | |
1685 | stream->next_uframe = next_uframe % mod; | |
1686 | ||
1687 | /* don't need that schedule data any more */ | |
1688 | iso_sched_free (stream, sched); | |
1689 | urb->hcpriv = NULL; | |
1690 | ||
1691 | timer_action (ehci, TIMER_IO_WATCHDOG); | |
1692 | if (!ehci->periodic_sched++) | |
1693 | return enable_periodic (ehci); | |
1694 | return 0; | |
1695 | } | |
1696 | ||
1697 | /*-------------------------------------------------------------------------*/ | |
1698 | ||
1699 | #define SITD_ERRS (SITD_STS_ERR | SITD_STS_DBE | SITD_STS_BABBLE \ | |
1700 | | SITD_STS_XACT | SITD_STS_MMF) | |
1701 | ||
1702 | static unsigned | |
1703 | sitd_complete ( | |
1704 | struct ehci_hcd *ehci, | |
1705 | struct ehci_sitd *sitd, | |
1706 | struct pt_regs *regs | |
1707 | ) { | |
1708 | struct urb *urb = sitd->urb; | |
1709 | struct usb_iso_packet_descriptor *desc; | |
1710 | u32 t; | |
1711 | int urb_index = -1; | |
1712 | struct ehci_iso_stream *stream = sitd->stream; | |
1713 | struct usb_device *dev; | |
1714 | ||
1715 | urb_index = sitd->index; | |
1716 | desc = &urb->iso_frame_desc [urb_index]; | |
1717 | t = le32_to_cpup (&sitd->hw_results); | |
1718 | ||
1719 | /* report transfer status */ | |
1720 | if (t & SITD_ERRS) { | |
1721 | urb->error_count++; | |
1722 | if (t & SITD_STS_DBE) | |
1723 | desc->status = usb_pipein (urb->pipe) | |
1724 | ? -ENOSR /* hc couldn't read */ | |
1725 | : -ECOMM; /* hc couldn't write */ | |
1726 | else if (t & SITD_STS_BABBLE) | |
1727 | desc->status = -EOVERFLOW; | |
1728 | else /* XACT, MMF, etc */ | |
1729 | desc->status = -EPROTO; | |
1730 | } else { | |
1731 | desc->status = 0; | |
1732 | desc->actual_length = desc->length - SITD_LENGTH (t); | |
1733 | } | |
1734 | ||
1735 | usb_put_urb (urb); | |
1736 | sitd->urb = NULL; | |
1737 | sitd->stream = NULL; | |
1738 | list_move (&sitd->sitd_list, &stream->free_list); | |
1739 | stream->depth -= stream->interval << 3; | |
1740 | iso_stream_put (ehci, stream); | |
1741 | ||
1742 | /* handle completion now? */ | |
1743 | if ((urb_index + 1) != urb->number_of_packets) | |
1744 | return 0; | |
1745 | ||
1746 | /* ASSERT: it's really the last sitd for this urb | |
1747 | list_for_each_entry (sitd, &stream->td_list, sitd_list) | |
1748 | BUG_ON (sitd->urb == urb); | |
1749 | */ | |
1750 | ||
1751 | /* give urb back to the driver */ | |
1752 | dev = usb_get_dev (urb->dev); | |
1753 | ehci_urb_done (ehci, urb, regs); | |
1754 | urb = NULL; | |
1755 | ||
1756 | /* defer stopping schedule; completion can submit */ | |
1757 | ehci->periodic_sched--; | |
1758 | if (!ehci->periodic_sched) | |
1759 | (void) disable_periodic (ehci); | |
1760 | ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs--; | |
1761 | ||
1762 | if (list_empty (&stream->td_list)) { | |
1763 | ehci_to_hcd(ehci)->self.bandwidth_allocated | |
1764 | -= stream->bandwidth; | |
1765 | ehci_vdbg (ehci, | |
1766 | "deschedule devp %s ep%d%s-iso\n", | |
1767 | dev->devpath, stream->bEndpointAddress & 0x0f, | |
1768 | (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out"); | |
1769 | } | |
1770 | iso_stream_put (ehci, stream); | |
1771 | usb_put_dev (dev); | |
1772 | ||
1773 | return 1; | |
1774 | } | |
1775 | ||
1776 | ||
5db539e4 OK |
1777 | static int sitd_submit (struct ehci_hcd *ehci, struct urb *urb, |
1778 | unsigned mem_flags) | |
1da177e4 LT |
1779 | { |
1780 | int status = -EINVAL; | |
1781 | unsigned long flags; | |
1782 | struct ehci_iso_stream *stream; | |
1783 | ||
1784 | /* Get iso_stream head */ | |
1785 | stream = iso_stream_find (ehci, urb); | |
1786 | if (stream == NULL) { | |
1787 | ehci_dbg (ehci, "can't get iso stream\n"); | |
1788 | return -ENOMEM; | |
1789 | } | |
1790 | if (urb->interval != stream->interval) { | |
1791 | ehci_dbg (ehci, "can't change iso interval %d --> %d\n", | |
1792 | stream->interval, urb->interval); | |
1793 | goto done; | |
1794 | } | |
1795 | ||
1796 | #ifdef EHCI_URB_TRACE | |
1797 | ehci_dbg (ehci, | |
1798 | "submit %p dev%s ep%d%s-iso len %d\n", | |
1799 | urb, urb->dev->devpath, | |
1800 | usb_pipeendpoint (urb->pipe), | |
1801 | usb_pipein (urb->pipe) ? "in" : "out", | |
1802 | urb->transfer_buffer_length); | |
1803 | #endif | |
1804 | ||
1805 | /* allocate SITDs */ | |
1806 | status = sitd_urb_transaction (stream, ehci, urb, mem_flags); | |
1807 | if (status < 0) { | |
1808 | ehci_dbg (ehci, "can't init sitds\n"); | |
1809 | goto done; | |
1810 | } | |
1811 | ||
1812 | /* schedule ... need to lock */ | |
1813 | spin_lock_irqsave (&ehci->lock, flags); | |
1814 | status = iso_stream_schedule (ehci, urb, stream); | |
1815 | if (status == 0) | |
1816 | sitd_link_urb (ehci, urb, ehci->periodic_size << 3, stream); | |
1817 | spin_unlock_irqrestore (&ehci->lock, flags); | |
1818 | ||
1819 | done: | |
1820 | if (status < 0) | |
1821 | iso_stream_put (ehci, stream); | |
1822 | return status; | |
1823 | } | |
1824 | ||
1825 | #else | |
1826 | ||
1827 | static inline int | |
5db539e4 OK |
1828 | sitd_submit (struct ehci_hcd *ehci, struct urb *urb, |
1829 | unsigned mem_flags) | |
1da177e4 LT |
1830 | { |
1831 | ehci_dbg (ehci, "split iso support is disabled\n"); | |
1832 | return -ENOSYS; | |
1833 | } | |
1834 | ||
1835 | static inline unsigned | |
1836 | sitd_complete ( | |
1837 | struct ehci_hcd *ehci, | |
1838 | struct ehci_sitd *sitd, | |
1839 | struct pt_regs *regs | |
1840 | ) { | |
1841 | ehci_err (ehci, "sitd_complete %p?\n", sitd); | |
1842 | return 0; | |
1843 | } | |
1844 | ||
1845 | #endif /* USB_EHCI_SPLIT_ISO */ | |
1846 | ||
1847 | /*-------------------------------------------------------------------------*/ | |
1848 | ||
1849 | static void | |
1850 | scan_periodic (struct ehci_hcd *ehci, struct pt_regs *regs) | |
1851 | { | |
1852 | unsigned frame, clock, now_uframe, mod; | |
1853 | unsigned modified; | |
1854 | ||
1855 | mod = ehci->periodic_size << 3; | |
1856 | ||
1857 | /* | |
1858 | * When running, scan from last scan point up to "now" | |
1859 | * else clean up by scanning everything that's left. | |
1860 | * Touches as few pages as possible: cache-friendly. | |
1861 | */ | |
1862 | now_uframe = ehci->next_uframe; | |
1863 | if (HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) | |
1864 | clock = readl (&ehci->regs->frame_index); | |
1865 | else | |
1866 | clock = now_uframe + mod - 1; | |
1867 | clock %= mod; | |
1868 | ||
1869 | for (;;) { | |
1870 | union ehci_shadow q, *q_p; | |
1871 | __le32 type, *hw_p; | |
1872 | unsigned uframes; | |
1873 | ||
1874 | /* don't scan past the live uframe */ | |
1875 | frame = now_uframe >> 3; | |
1876 | if (frame == (clock >> 3)) | |
1877 | uframes = now_uframe & 0x07; | |
1878 | else { | |
1879 | /* safe to scan the whole frame at once */ | |
1880 | now_uframe |= 0x07; | |
1881 | uframes = 8; | |
1882 | } | |
1883 | ||
1884 | restart: | |
1885 | /* scan each element in frame's queue for completions */ | |
1886 | q_p = &ehci->pshadow [frame]; | |
1887 | hw_p = &ehci->periodic [frame]; | |
1888 | q.ptr = q_p->ptr; | |
1889 | type = Q_NEXT_TYPE (*hw_p); | |
1890 | modified = 0; | |
1891 | ||
1892 | while (q.ptr != NULL) { | |
1893 | unsigned uf; | |
1894 | union ehci_shadow temp; | |
1895 | int live; | |
1896 | ||
1897 | live = HC_IS_RUNNING (ehci_to_hcd(ehci)->state); | |
1898 | switch (type) { | |
1899 | case Q_TYPE_QH: | |
1900 | /* handle any completions */ | |
1901 | temp.qh = qh_get (q.qh); | |
1902 | type = Q_NEXT_TYPE (q.qh->hw_next); | |
1903 | q = q.qh->qh_next; | |
1904 | modified = qh_completions (ehci, temp.qh, regs); | |
1905 | if (unlikely (list_empty (&temp.qh->qtd_list))) | |
1906 | intr_deschedule (ehci, temp.qh); | |
1907 | qh_put (temp.qh); | |
1908 | break; | |
1909 | case Q_TYPE_FSTN: | |
1910 | /* for "save place" FSTNs, look at QH entries | |
1911 | * in the previous frame for completions. | |
1912 | */ | |
1913 | if (q.fstn->hw_prev != EHCI_LIST_END) { | |
1914 | dbg ("ignoring completions from FSTNs"); | |
1915 | } | |
1916 | type = Q_NEXT_TYPE (q.fstn->hw_next); | |
1917 | q = q.fstn->fstn_next; | |
1918 | break; | |
1919 | case Q_TYPE_ITD: | |
1920 | /* skip itds for later in the frame */ | |
1921 | rmb (); | |
1922 | for (uf = live ? uframes : 8; uf < 8; uf++) { | |
1923 | if (0 == (q.itd->hw_transaction [uf] | |
1924 | & ITD_ACTIVE)) | |
1925 | continue; | |
1926 | q_p = &q.itd->itd_next; | |
1927 | hw_p = &q.itd->hw_next; | |
1928 | type = Q_NEXT_TYPE (q.itd->hw_next); | |
1929 | q = *q_p; | |
1930 | break; | |
1931 | } | |
1932 | if (uf != 8) | |
1933 | break; | |
1934 | ||
1935 | /* this one's ready ... HC won't cache the | |
1936 | * pointer for much longer, if at all. | |
1937 | */ | |
1938 | *q_p = q.itd->itd_next; | |
1939 | *hw_p = q.itd->hw_next; | |
1940 | type = Q_NEXT_TYPE (q.itd->hw_next); | |
1941 | wmb(); | |
1942 | modified = itd_complete (ehci, q.itd, regs); | |
1943 | q = *q_p; | |
1944 | break; | |
1945 | case Q_TYPE_SITD: | |
1946 | if ((q.sitd->hw_results & SITD_ACTIVE) | |
1947 | && live) { | |
1948 | q_p = &q.sitd->sitd_next; | |
1949 | hw_p = &q.sitd->hw_next; | |
1950 | type = Q_NEXT_TYPE (q.sitd->hw_next); | |
1951 | q = *q_p; | |
1952 | break; | |
1953 | } | |
1954 | *q_p = q.sitd->sitd_next; | |
1955 | *hw_p = q.sitd->hw_next; | |
1956 | type = Q_NEXT_TYPE (q.sitd->hw_next); | |
1957 | wmb(); | |
1958 | modified = sitd_complete (ehci, q.sitd, regs); | |
1959 | q = *q_p; | |
1960 | break; | |
1961 | default: | |
1962 | dbg ("corrupt type %d frame %d shadow %p", | |
1963 | type, frame, q.ptr); | |
1964 | // BUG (); | |
1965 | q.ptr = NULL; | |
1966 | } | |
1967 | ||
1968 | /* assume completion callbacks modify the queue */ | |
1969 | if (unlikely (modified)) | |
1970 | goto restart; | |
1971 | } | |
1972 | ||
1973 | /* stop when we catch up to the HC */ | |
1974 | ||
1975 | // FIXME: this assumes we won't get lapped when | |
1976 | // latencies climb; that should be rare, but... | |
1977 | // detect it, and just go all the way around. | |
1978 | // FLR might help detect this case, so long as latencies | |
1979 | // don't exceed periodic_size msec (default 1.024 sec). | |
1980 | ||
1981 | // FIXME: likewise assumes HC doesn't halt mid-scan | |
1982 | ||
1983 | if (now_uframe == clock) { | |
1984 | unsigned now; | |
1985 | ||
1986 | if (!HC_IS_RUNNING (ehci_to_hcd(ehci)->state)) | |
1987 | break; | |
1988 | ehci->next_uframe = now_uframe; | |
1989 | now = readl (&ehci->regs->frame_index) % mod; | |
1990 | if (now_uframe == now) | |
1991 | break; | |
1992 | ||
1993 | /* rescan the rest of this frame, then ... */ | |
1994 | clock = now; | |
1995 | } else { | |
1996 | now_uframe++; | |
1997 | now_uframe %= mod; | |
1998 | } | |
1999 | } | |
2000 | } |