USB: Support for bandwidth allocation.
[linux-block.git] / drivers / usb / host / xhci-ring.c
CommitLineData
7f84eef0
SS
1/*
2 * xHCI host controller driver
3 *
4 * Copyright (C) 2008 Intel Corp.
5 *
6 * Author: Sarah Sharp
7 * Some code borrowed from the Linux EHCI driver.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 * for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 */
22
23/*
24 * Ring initialization rules:
25 * 1. Each segment is initialized to zero, except for link TRBs.
26 * 2. Ring cycle state = 0. This represents Producer Cycle State (PCS) or
27 * Consumer Cycle State (CCS), depending on ring function.
28 * 3. Enqueue pointer = dequeue pointer = address of first TRB in the segment.
29 *
30 * Ring behavior rules:
31 * 1. A ring is empty if enqueue == dequeue. This means there will always be at
32 * least one free TRB in the ring. This is useful if you want to turn that
33 * into a link TRB and expand the ring.
34 * 2. When incrementing an enqueue or dequeue pointer, if the next TRB is a
35 * link TRB, then load the pointer with the address in the link TRB. If the
36 * link TRB had its toggle bit set, you may need to update the ring cycle
37 * state (see cycle bit rules). You may have to do this multiple times
38 * until you reach a non-link TRB.
39 * 3. A ring is full if enqueue++ (for the definition of increment above)
40 * equals the dequeue pointer.
41 *
42 * Cycle bit rules:
43 * 1. When a consumer increments a dequeue pointer and encounters a toggle bit
44 * in a link TRB, it must toggle the ring cycle state.
45 * 2. When a producer increments an enqueue pointer and encounters a toggle bit
46 * in a link TRB, it must toggle the ring cycle state.
47 *
48 * Producer rules:
49 * 1. Check if ring is full before you enqueue.
50 * 2. Write the ring cycle state to the cycle bit in the TRB you're enqueuing.
51 * Update enqueue pointer between each write (which may update the ring
52 * cycle state).
53 * 3. Notify consumer. If SW is producer, it rings the doorbell for command
54 * and endpoint rings. If HC is the producer for the event ring,
55 * and it generates an interrupt according to interrupt modulation rules.
56 *
57 * Consumer rules:
58 * 1. Check if TRB belongs to you. If the cycle bit == your ring cycle state,
59 * the TRB is owned by the consumer.
60 * 2. Update dequeue pointer (which may update the ring cycle state) and
61 * continue processing TRBs until you reach a TRB which is not owned by you.
62 * 3. Notify the producer. SW is the consumer for the event ring, and it
63 * updates event ring dequeue pointer. HC is the consumer for the command and
64 * endpoint rings; it generates events on the event ring for these.
65 */
66
67#include "xhci.h"
68
69/*
70 * Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
71 * address of the TRB.
72 */
73dma_addr_t trb_virt_to_dma(struct xhci_segment *seg,
74 union xhci_trb *trb)
75{
76 unsigned int offset;
77
78 if (!seg || !trb || (void *) trb < (void *) seg->trbs)
79 return 0;
80 /* offset in bytes, since these are byte-addressable */
81 offset = (unsigned int) trb - (unsigned int) seg->trbs;
82 /* SEGMENT_SIZE in bytes, trbs are 16-byte aligned */
83 if (offset > SEGMENT_SIZE || (offset % sizeof(*trb)) != 0)
84 return 0;
85 return seg->dma + offset;
86}
87
88/* Does this link TRB point to the first segment in a ring,
89 * or was the previous TRB the last TRB on the last segment in the ERST?
90 */
91static inline bool last_trb_on_last_seg(struct xhci_hcd *xhci, struct xhci_ring *ring,
92 struct xhci_segment *seg, union xhci_trb *trb)
93{
94 if (ring == xhci->event_ring)
95 return (trb == &seg->trbs[TRBS_PER_SEGMENT]) &&
96 (seg->next == xhci->event_ring->first_seg);
97 else
98 return trb->link.control & LINK_TOGGLE;
99}
100
101/* Is this TRB a link TRB or was the last TRB the last TRB in this event ring
102 * segment? I.e. would the updated event TRB pointer step off the end of the
103 * event seg?
104 */
105static inline int last_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
106 struct xhci_segment *seg, union xhci_trb *trb)
107{
108 if (ring == xhci->event_ring)
109 return trb == &seg->trbs[TRBS_PER_SEGMENT];
110 else
111 return (trb->link.control & TRB_TYPE_BITMASK) == TRB_TYPE(TRB_LINK);
112}
113
114/*
115 * See Cycle bit rules. SW is the consumer for the event ring only.
116 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
117 */
118static void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
119{
120 union xhci_trb *next = ++(ring->dequeue);
121
122 ring->deq_updates++;
123 /* Update the dequeue pointer further if that was a link TRB or we're at
124 * the end of an event ring segment (which doesn't have link TRBS)
125 */
126 while (last_trb(xhci, ring, ring->deq_seg, next)) {
127 if (consumer && last_trb_on_last_seg(xhci, ring, ring->deq_seg, next)) {
128 ring->cycle_state = (ring->cycle_state ? 0 : 1);
129 if (!in_interrupt())
130 xhci_dbg(xhci, "Toggle cycle state for ring 0x%x = %i\n",
131 (unsigned int) ring,
132 (unsigned int) ring->cycle_state);
133 }
134 ring->deq_seg = ring->deq_seg->next;
135 ring->dequeue = ring->deq_seg->trbs;
136 next = ring->dequeue;
137 }
138}
139
140/*
141 * See Cycle bit rules. SW is the consumer for the event ring only.
142 * Don't make a ring full of link TRBs. That would be dumb and this would loop.
143 *
144 * If we've just enqueued a TRB that is in the middle of a TD (meaning the
145 * chain bit is set), then set the chain bit in all the following link TRBs.
146 * If we've enqueued the last TRB in a TD, make sure the following link TRBs
147 * have their chain bit cleared (so that each Link TRB is a separate TD).
148 *
149 * Section 6.4.4.1 of the 0.95 spec says link TRBs cannot have the chain bit
150 * set, but other sections talk about dealing with the chain bit set.
151 * Assume section 6.4.4.1 is wrong, and the chain bit can be set in a Link TRB.
152 */
153static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring, bool consumer)
154{
155 u32 chain;
156 union xhci_trb *next;
157
158 chain = ring->enqueue->generic.field[3] & TRB_CHAIN;
159 next = ++(ring->enqueue);
160
161 ring->enq_updates++;
162 /* Update the dequeue pointer further if that was a link TRB or we're at
163 * the end of an event ring segment (which doesn't have link TRBS)
164 */
165 while (last_trb(xhci, ring, ring->enq_seg, next)) {
166 if (!consumer) {
167 if (ring != xhci->event_ring) {
168 /* Give this link TRB to the hardware */
169 if (next->link.control & TRB_CYCLE)
170 next->link.control &= (u32) ~TRB_CYCLE;
171 else
172 next->link.control |= (u32) TRB_CYCLE;
173 next->link.control &= TRB_CHAIN;
174 next->link.control |= chain;
175 }
176 /* Toggle the cycle bit after the last ring segment. */
177 if (last_trb_on_last_seg(xhci, ring, ring->enq_seg, next)) {
178 ring->cycle_state = (ring->cycle_state ? 0 : 1);
179 if (!in_interrupt())
180 xhci_dbg(xhci, "Toggle cycle state for ring 0x%x = %i\n",
181 (unsigned int) ring,
182 (unsigned int) ring->cycle_state);
183 }
184 }
185 ring->enq_seg = ring->enq_seg->next;
186 ring->enqueue = ring->enq_seg->trbs;
187 next = ring->enqueue;
188 }
189}
190
191/*
192 * Check to see if there's room to enqueue num_trbs on the ring. See rules
193 * above.
194 * FIXME: this would be simpler and faster if we just kept track of the number
195 * of free TRBs in a ring.
196 */
197static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring,
198 unsigned int num_trbs)
199{
200 int i;
201 union xhci_trb *enq = ring->enqueue;
202 struct xhci_segment *enq_seg = ring->enq_seg;
203
204 /* Check if ring is empty */
205 if (enq == ring->dequeue)
206 return 1;
207 /* Make sure there's an extra empty TRB available */
208 for (i = 0; i <= num_trbs; ++i) {
209 if (enq == ring->dequeue)
210 return 0;
211 enq++;
212 while (last_trb(xhci, ring, enq_seg, enq)) {
213 enq_seg = enq_seg->next;
214 enq = enq_seg->trbs;
215 }
216 }
217 return 1;
218}
219
220void set_hc_event_deq(struct xhci_hcd *xhci)
221{
222 u32 temp;
223 dma_addr_t deq;
224
225 deq = trb_virt_to_dma(xhci->event_ring->deq_seg,
226 xhci->event_ring->dequeue);
227 if (deq == 0 && !in_interrupt())
228 xhci_warn(xhci, "WARN something wrong with SW event ring "
229 "dequeue ptr.\n");
230 /* Update HC event ring dequeue pointer */
231 temp = xhci_readl(xhci, &xhci->ir_set->erst_dequeue[0]);
232 temp &= ERST_PTR_MASK;
233 if (!in_interrupt())
234 xhci_dbg(xhci, "// Write event ring dequeue pointer\n");
235 xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]);
236 xhci_writel(xhci, (deq & ~ERST_PTR_MASK) | temp,
237 &xhci->ir_set->erst_dequeue[0]);
238}
239
240/* Ring the host controller doorbell after placing a command on the ring */
241void ring_cmd_db(struct xhci_hcd *xhci)
242{
243 u32 temp;
244
245 xhci_dbg(xhci, "// Ding dong!\n");
246 temp = xhci_readl(xhci, &xhci->dba->doorbell[0]) & DB_MASK;
247 xhci_writel(xhci, temp | DB_TARGET_HOST, &xhci->dba->doorbell[0]);
248 /* Flush PCI posted writes */
249 xhci_readl(xhci, &xhci->dba->doorbell[0]);
250}
251
252static void handle_cmd_completion(struct xhci_hcd *xhci,
253 struct xhci_event_cmd *event)
254{
3ffbba95 255 int slot_id = TRB_TO_SLOT_ID(event->flags);
7f84eef0
SS
256 u64 cmd_dma;
257 dma_addr_t cmd_dequeue_dma;
258
7f84eef0
SS
259 cmd_dma = (((u64) event->cmd_trb[1]) << 32) + event->cmd_trb[0];
260 cmd_dequeue_dma = trb_virt_to_dma(xhci->cmd_ring->deq_seg,
261 xhci->cmd_ring->dequeue);
262 /* Is the command ring deq ptr out of sync with the deq seg ptr? */
263 if (cmd_dequeue_dma == 0) {
264 xhci->error_bitmask |= 1 << 4;
265 return;
266 }
267 /* Does the DMA address match our internal dequeue pointer address? */
268 if (cmd_dma != (u64) cmd_dequeue_dma) {
269 xhci->error_bitmask |= 1 << 5;
270 return;
271 }
272 switch (xhci->cmd_ring->dequeue->generic.field[3] & TRB_TYPE_BITMASK) {
3ffbba95
SS
273 case TRB_TYPE(TRB_ENABLE_SLOT):
274 if (GET_COMP_CODE(event->status) == COMP_SUCCESS)
275 xhci->slot_id = slot_id;
276 else
277 xhci->slot_id = 0;
278 complete(&xhci->addr_dev);
279 break;
280 case TRB_TYPE(TRB_DISABLE_SLOT):
281 if (xhci->devs[slot_id])
282 xhci_free_virt_device(xhci, slot_id);
283 break;
284 case TRB_TYPE(TRB_ADDR_DEV):
285 xhci->devs[slot_id]->cmd_status = GET_COMP_CODE(event->status);
286 complete(&xhci->addr_dev);
287 break;
7f84eef0
SS
288 case TRB_TYPE(TRB_CMD_NOOP):
289 ++xhci->noops_handled;
290 break;
291 default:
292 /* Skip over unknown commands on the event ring */
293 xhci->error_bitmask |= 1 << 6;
294 break;
295 }
296 inc_deq(xhci, xhci->cmd_ring, false);
297}
298
0f2a7930
SS
299static void handle_port_status(struct xhci_hcd *xhci,
300 union xhci_trb *event)
301{
302 u32 port_id;
303
304 /* Port status change events always have a successful completion code */
305 if (GET_COMP_CODE(event->generic.field[2]) != COMP_SUCCESS) {
306 xhci_warn(xhci, "WARN: xHC returned failed port status event\n");
307 xhci->error_bitmask |= 1 << 8;
308 }
309 /* FIXME: core doesn't care about all port link state changes yet */
310 port_id = GET_PORT_ID(event->generic.field[0]);
311 xhci_dbg(xhci, "Port Status Change Event for port %d\n", port_id);
312
313 /* Update event ring dequeue pointer before dropping the lock */
314 inc_deq(xhci, xhci->event_ring, true);
315 set_hc_event_deq(xhci);
316
317 spin_unlock(&xhci->lock);
318 /* Pass this up to the core */
319 usb_hcd_poll_rh_status(xhci_to_hcd(xhci));
320 spin_lock(&xhci->lock);
321}
322
d0e96f5a
SS
323/*
324 * This TD is defined by the TRBs starting at start_trb in start_seg and ending
325 * at end_trb, which may be in another segment. If the suspect DMA address is a
326 * TRB in this TD, this function returns that TRB's segment. Otherwise it
327 * returns 0.
328 */
329static struct xhci_segment *trb_in_td(
330 struct xhci_segment *start_seg,
331 union xhci_trb *start_trb,
332 union xhci_trb *end_trb,
333 dma_addr_t suspect_dma)
334{
335 dma_addr_t start_dma;
336 dma_addr_t end_seg_dma;
337 dma_addr_t end_trb_dma;
338 struct xhci_segment *cur_seg;
339
340 start_dma = trb_virt_to_dma(start_seg, start_trb);
341 cur_seg = start_seg;
342
343 do {
344 /*
345 * Last TRB is a link TRB (unless we start inserting links in
346 * the middle, FIXME if you do)
347 */
348 end_seg_dma = trb_virt_to_dma(cur_seg, &start_seg->trbs[TRBS_PER_SEGMENT - 2]);
349 /* If the end TRB isn't in this segment, this is set to 0 */
350 end_trb_dma = trb_virt_to_dma(cur_seg, end_trb);
351
352 if (end_trb_dma > 0) {
353 /* The end TRB is in this segment, so suspect should be here */
354 if (start_dma <= end_trb_dma) {
355 if (suspect_dma >= start_dma && suspect_dma <= end_trb_dma)
356 return cur_seg;
357 } else {
358 /* Case for one segment with
359 * a TD wrapped around to the top
360 */
361 if ((suspect_dma >= start_dma &&
362 suspect_dma <= end_seg_dma) ||
363 (suspect_dma >= cur_seg->dma &&
364 suspect_dma <= end_trb_dma))
365 return cur_seg;
366 }
367 return 0;
368 } else {
369 /* Might still be somewhere in this segment */
370 if (suspect_dma >= start_dma && suspect_dma <= end_seg_dma)
371 return cur_seg;
372 }
373 cur_seg = cur_seg->next;
374 start_dma = trb_virt_to_dma(cur_seg, &cur_seg->trbs[0]);
375 } while (1);
376
377}
378
379/*
380 * If this function returns an error condition, it means it got a Transfer
381 * event with a corrupted Slot ID, Endpoint ID, or TRB DMA address.
382 * At this point, the host controller is probably hosed and should be reset.
383 */
384static int handle_tx_event(struct xhci_hcd *xhci,
385 struct xhci_transfer_event *event)
386{
387 struct xhci_virt_device *xdev;
388 struct xhci_ring *ep_ring;
389 int ep_index;
390 struct xhci_td *td = 0;
391 dma_addr_t event_dma;
392 struct xhci_segment *event_seg;
393 union xhci_trb *event_trb;
394 struct urb *urb = NULL;
395 int status = -EINPROGRESS;
396
397 xdev = xhci->devs[TRB_TO_SLOT_ID(event->flags)];
398 if (!xdev) {
399 xhci_err(xhci, "ERROR Transfer event pointed to bad slot\n");
400 return -ENODEV;
401 }
402
403 /* Endpoint ID is 1 based, our index is zero based */
404 ep_index = TRB_TO_EP_ID(event->flags) - 1;
405 ep_ring = xdev->ep_rings[ep_index];
406 if (!ep_ring || (xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK) == EP_STATE_DISABLED) {
407 xhci_err(xhci, "ERROR Transfer event pointed to disabled endpoint\n");
408 return -ENODEV;
409 }
410
411 event_dma = event->buffer[0];
412 if (event->buffer[1] != 0)
413 xhci_warn(xhci, "WARN ignoring upper 32-bits of 64-bit TRB dma address\n");
414
415 /* This TRB should be in the TD at the head of this ring's TD list */
416 if (list_empty(&ep_ring->td_list)) {
417 xhci_warn(xhci, "WARN Event TRB for slot %d ep %d with no TDs queued?\n",
418 TRB_TO_SLOT_ID(event->flags), ep_index);
419 xhci_dbg(xhci, "Event TRB with TRB type ID %u\n",
420 (unsigned int) (event->flags & TRB_TYPE_BITMASK)>>10);
421 xhci_print_trb_offsets(xhci, (union xhci_trb *) event);
422 urb = NULL;
423 goto cleanup;
424 }
425 td = list_entry(ep_ring->td_list.next, struct xhci_td, td_list);
426
427 /* Is this a TRB in the currently executing TD? */
428 event_seg = trb_in_td(ep_ring->deq_seg, ep_ring->dequeue,
429 td->last_trb, event_dma);
430 if (!event_seg) {
431 /* HC is busted, give up! */
432 xhci_err(xhci, "ERROR Transfer event TRB DMA ptr not part of current TD\n");
433 return -ESHUTDOWN;
434 }
435 event_trb = &event_seg->trbs[(event_dma - event_seg->dma) / sizeof(*event_trb)];
436
437 /* Now update the urb's actual_length and give back to the core */
438 /* Was this a control transfer? */
439 if (usb_endpoint_xfer_control(&td->urb->ep->desc)) {
440 xhci_debug_trb(xhci, xhci->event_ring->dequeue);
441 switch (GET_COMP_CODE(event->transfer_len)) {
442 case COMP_SUCCESS:
443 if (event_trb == ep_ring->dequeue) {
444 xhci_warn(xhci, "WARN: Success on ctrl setup TRB without IOC set??\n");
445 status = -ESHUTDOWN;
446 } else if (event_trb != td->last_trb) {
447 xhci_warn(xhci, "WARN: Success on ctrl data TRB without IOC set??\n");
448 status = -ESHUTDOWN;
449 } else {
450 xhci_dbg(xhci, "Successful control transfer!\n");
451 status = 0;
452 }
453 break;
454 case COMP_SHORT_TX:
455 xhci_warn(xhci, "WARN: short transfer on control ep\n");
456 status = -EREMOTEIO;
457 break;
458 case COMP_STALL:
459 xhci_warn(xhci, "WARN: Stalled control ep\n");
460 status = -EPIPE;
461 break;
462 case COMP_TRB_ERR:
463 xhci_warn(xhci, "WARN: TRB error on control ep\n");
464 status = -EILSEQ;
465 break;
466 case COMP_TX_ERR:
467 xhci_warn(xhci, "WARN: transfer error on control ep\n");
468 status = -EPROTO;
469 break;
470 case COMP_DB_ERR:
471 xhci_warn(xhci, "WARN: HC couldn't access mem fast enough on control TX\n");
472 status = -ENOSR;
473 break;
474 default:
475 xhci_dbg(xhci, "ERROR Unknown event condition, HC probably busted\n");
476 goto cleanup;
477 }
478 /*
479 * Did we transfer any data, despite the errors that might have
480 * happened? I.e. did we get past the setup stage?
481 */
482 if (event_trb != ep_ring->dequeue) {
483 /* The event was for the status stage */
484 if (event_trb == td->last_trb) {
485 td->urb->actual_length = td->urb->transfer_buffer_length;
486 } else {
487 /* The event was for the data stage */
488 td->urb->actual_length = td->urb->transfer_buffer_length -
489 TRB_LEN(event->transfer_len);
490 }
491 }
492 while (ep_ring->dequeue != td->last_trb)
493 inc_deq(xhci, ep_ring, false);
494 inc_deq(xhci, ep_ring, false);
495
496 /* Clean up the endpoint's TD list */
497 urb = td->urb;
498 list_del(&td->td_list);
499 kfree(td);
500 } else {
501 xhci_dbg(xhci, "FIXME do something for non-control transfers\n");
502 }
503cleanup:
504 inc_deq(xhci, xhci->event_ring, true);
505 set_hc_event_deq(xhci);
506
507 if (urb) {
508 usb_hcd_unlink_urb_from_ep(xhci_to_hcd(xhci), urb);
509 spin_unlock(&xhci->lock);
510 usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, status);
511 spin_lock(&xhci->lock);
512 }
513 return 0;
514}
515
0f2a7930
SS
516/*
517 * This function handles all OS-owned events on the event ring. It may drop
518 * xhci->lock between event processing (e.g. to pass up port status changes).
519 */
7f84eef0
SS
520void handle_event(struct xhci_hcd *xhci)
521{
522 union xhci_trb *event;
0f2a7930 523 int update_ptrs = 1;
d0e96f5a 524 int ret;
7f84eef0
SS
525
526 if (!xhci->event_ring || !xhci->event_ring->dequeue) {
527 xhci->error_bitmask |= 1 << 1;
528 return;
529 }
530
531 event = xhci->event_ring->dequeue;
532 /* Does the HC or OS own the TRB? */
533 if ((event->event_cmd.flags & TRB_CYCLE) !=
534 xhci->event_ring->cycle_state) {
535 xhci->error_bitmask |= 1 << 2;
536 return;
537 }
538
0f2a7930 539 /* FIXME: Handle more event types. */
7f84eef0
SS
540 switch ((event->event_cmd.flags & TRB_TYPE_BITMASK)) {
541 case TRB_TYPE(TRB_COMPLETION):
542 handle_cmd_completion(xhci, &event->event_cmd);
543 break;
0f2a7930
SS
544 case TRB_TYPE(TRB_PORT_STATUS):
545 handle_port_status(xhci, event);
546 update_ptrs = 0;
547 break;
d0e96f5a
SS
548 case TRB_TYPE(TRB_TRANSFER):
549 ret = handle_tx_event(xhci, &event->trans_event);
550 if (ret < 0)
551 xhci->error_bitmask |= 1 << 9;
552 else
553 update_ptrs = 0;
554 break;
7f84eef0
SS
555 default:
556 xhci->error_bitmask |= 1 << 3;
557 }
558
0f2a7930
SS
559 if (update_ptrs) {
560 /* Update SW and HC event ring dequeue pointer */
561 inc_deq(xhci, xhci->event_ring, true);
562 set_hc_event_deq(xhci);
563 }
7f84eef0
SS
564 /* Are there more items on the event ring? */
565 handle_event(xhci);
566}
567
d0e96f5a
SS
568/**** Endpoint Ring Operations ****/
569
7f84eef0
SS
570/*
571 * Generic function for queueing a TRB on a ring.
572 * The caller must have checked to make sure there's room on the ring.
573 */
574static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
575 bool consumer,
576 u32 field1, u32 field2, u32 field3, u32 field4)
577{
578 struct xhci_generic_trb *trb;
579
580 trb = &ring->enqueue->generic;
581 trb->field[0] = field1;
582 trb->field[1] = field2;
583 trb->field[2] = field3;
584 trb->field[3] = field4;
585 inc_enq(xhci, ring, consumer);
586}
587
d0e96f5a
SS
588/*
589 * Does various checks on the endpoint ring, and makes it ready to queue num_trbs.
590 * FIXME allocate segments if the ring is full.
591 */
592static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
593 u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
594{
595 /* Make sure the endpoint has been added to xHC schedule */
596 xhci_dbg(xhci, "Endpoint state = 0x%x\n", ep_state);
597 switch (ep_state) {
598 case EP_STATE_DISABLED:
599 /*
600 * USB core changed config/interfaces without notifying us,
601 * or hardware is reporting the wrong state.
602 */
603 xhci_warn(xhci, "WARN urb submitted to disabled ep\n");
604 return -ENOENT;
605 case EP_STATE_HALTED:
606 case EP_STATE_ERROR:
607 xhci_warn(xhci, "WARN waiting for halt or error on ep "
608 "to be cleared\n");
609 /* FIXME event handling code for error needs to clear it */
610 /* XXX not sure if this should be -ENOENT or not */
611 return -EINVAL;
612 case EP_STATE_STOPPED:
613 case EP_STATE_RUNNING:
614 break;
615 default:
616 xhci_err(xhci, "ERROR unknown endpoint state for ep\n");
617 /*
618 * FIXME issue Configure Endpoint command to try to get the HC
619 * back into a known state.
620 */
621 return -EINVAL;
622 }
623 if (!room_on_ring(xhci, ep_ring, num_trbs)) {
624 /* FIXME allocate more room */
625 xhci_err(xhci, "ERROR no room on ep ring\n");
626 return -ENOMEM;
627 }
628 return 0;
629}
630
631int xhci_prepare_transfer(struct xhci_hcd *xhci,
632 struct xhci_virt_device *xdev,
633 unsigned int ep_index,
634 unsigned int num_trbs,
635 struct urb *urb,
636 struct xhci_td **td,
637 gfp_t mem_flags)
638{
639 int ret;
640
641 ret = prepare_ring(xhci, xdev->ep_rings[ep_index],
642 xdev->out_ctx->ep[ep_index].ep_info & EP_STATE_MASK,
643 num_trbs, mem_flags);
644 if (ret)
645 return ret;
646 *td = kzalloc(sizeof(struct xhci_td), mem_flags);
647 if (!*td)
648 return -ENOMEM;
649 INIT_LIST_HEAD(&(*td)->td_list);
650
651 ret = usb_hcd_link_urb_to_ep(xhci_to_hcd(xhci), urb);
652 if (unlikely(ret)) {
653 kfree(*td);
654 return ret;
655 }
656
657 (*td)->urb = urb;
658 urb->hcpriv = (void *) (*td);
659 /* Add this TD to the tail of the endpoint ring's TD list */
660 list_add_tail(&(*td)->td_list, &xdev->ep_rings[ep_index]->td_list);
661
662 return 0;
663}
664
665/* Caller must have locked xhci->lock */
666int queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
667 struct urb *urb, int slot_id, unsigned int ep_index)
668{
669 struct xhci_ring *ep_ring;
670 int num_trbs;
671 int ret;
672 struct usb_ctrlrequest *setup;
673 struct xhci_generic_trb *start_trb;
674 int start_cycle;
675 u32 field;
676 struct xhci_td *td;
677
678 ep_ring = xhci->devs[slot_id]->ep_rings[ep_index];
679
680 /*
681 * Need to copy setup packet into setup TRB, so we can't use the setup
682 * DMA address.
683 */
684 if (!urb->setup_packet)
685 return -EINVAL;
686
687 if (!in_interrupt())
688 xhci_dbg(xhci, "Queueing ctrl tx for slot id %d, ep %d\n",
689 slot_id, ep_index);
690 /* 1 TRB for setup, 1 for status */
691 num_trbs = 2;
692 /*
693 * Don't need to check if we need additional event data and normal TRBs,
694 * since data in control transfers will never get bigger than 16MB
695 * XXX: can we get a buffer that crosses 64KB boundaries?
696 */
697 if (urb->transfer_buffer_length > 0)
698 num_trbs++;
699 ret = xhci_prepare_transfer(xhci, xhci->devs[slot_id], ep_index, num_trbs,
700 urb, &td, mem_flags);
701 if (ret < 0)
702 return ret;
703
704 /*
705 * Don't give the first TRB to the hardware (by toggling the cycle bit)
706 * until we've finished creating all the other TRBs. The ring's cycle
707 * state may change as we enqueue the other TRBs, so save it too.
708 */
709 start_trb = &ep_ring->enqueue->generic;
710 start_cycle = ep_ring->cycle_state;
711
712 /* Queue setup TRB - see section 6.4.1.2.1 */
713 /* FIXME better way to translate setup_packet into two u32 fields? */
714 setup = (struct usb_ctrlrequest *) urb->setup_packet;
715 queue_trb(xhci, ep_ring, false,
716 /* FIXME endianness is probably going to bite my ass here. */
717 setup->bRequestType | setup->bRequest << 8 | setup->wValue << 16,
718 setup->wIndex | setup->wLength << 16,
719 TRB_LEN(8) | TRB_INTR_TARGET(0),
720 /* Immediate data in pointer */
721 TRB_IDT | TRB_TYPE(TRB_SETUP));
722
723 /* If there's data, queue data TRBs */
724 field = 0;
725 if (urb->transfer_buffer_length > 0) {
726 if (setup->bRequestType & USB_DIR_IN)
727 field |= TRB_DIR_IN;
728 queue_trb(xhci, ep_ring, false,
729 lower_32_bits(urb->transfer_dma),
730 upper_32_bits(urb->transfer_dma),
731 TRB_LEN(urb->transfer_buffer_length) | TRB_INTR_TARGET(0),
732 /* Event on short tx */
733 field | TRB_ISP | TRB_TYPE(TRB_DATA) | ep_ring->cycle_state);
734 }
735
736 /* Save the DMA address of the last TRB in the TD */
737 td->last_trb = ep_ring->enqueue;
738
739 /* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
740 /* If the device sent data, the status stage is an OUT transfer */
741 if (urb->transfer_buffer_length > 0 && setup->bRequestType & USB_DIR_IN)
742 field = 0;
743 else
744 field = TRB_DIR_IN;
745 queue_trb(xhci, ep_ring, false,
746 0,
747 0,
748 TRB_INTR_TARGET(0),
749 /* Event on completion */
750 field | TRB_IOC | TRB_TYPE(TRB_STATUS) | ep_ring->cycle_state);
751
752 /*
753 * Pass all the TRBs to the hardware at once and make sure this write
754 * isn't reordered.
755 */
756 wmb();
757 start_trb->field[3] |= start_cycle;
758 field = xhci_readl(xhci, &xhci->dba->doorbell[slot_id]) & DB_MASK;
759 xhci_writel(xhci, field | EPI_TO_DB(ep_index), &xhci->dba->doorbell[slot_id]);
760 /* Flush PCI posted writes */
761 xhci_readl(xhci, &xhci->dba->doorbell[slot_id]);
762
763 return 0;
764}
765
766/**** Command Ring Operations ****/
767
7f84eef0
SS
768/* Generic function for queueing a command TRB on the command ring */
769static int queue_command(struct xhci_hcd *xhci, u32 field1, u32 field2, u32 field3, u32 field4)
770{
771 if (!room_on_ring(xhci, xhci->cmd_ring, 1)) {
772 if (!in_interrupt())
773 xhci_err(xhci, "ERR: No room for command on command ring\n");
774 return -ENOMEM;
775 }
776 queue_trb(xhci, xhci->cmd_ring, false, field1, field2, field3,
777 field4 | xhci->cmd_ring->cycle_state);
778 return 0;
779}
780
781/* Queue a no-op command on the command ring */
782static int queue_cmd_noop(struct xhci_hcd *xhci)
783{
784 return queue_command(xhci, 0, 0, 0, TRB_TYPE(TRB_CMD_NOOP));
785}
786
787/*
788 * Place a no-op command on the command ring to test the command and
789 * event ring.
790 */
791void *setup_one_noop(struct xhci_hcd *xhci)
792{
793 if (queue_cmd_noop(xhci) < 0)
794 return NULL;
795 xhci->noops_submitted++;
796 return ring_cmd_db;
797}
3ffbba95
SS
798
799/* Queue a slot enable or disable request on the command ring */
800int queue_slot_control(struct xhci_hcd *xhci, u32 trb_type, u32 slot_id)
801{
802 return queue_command(xhci, 0, 0, 0,
803 TRB_TYPE(trb_type) | SLOT_ID_FOR_TRB(slot_id));
804}
805
806/* Queue an address device command TRB */
807int queue_address_device(struct xhci_hcd *xhci, dma_addr_t in_ctx_ptr, u32 slot_id)
808{
809 return queue_command(xhci, in_ctx_ptr, 0, 0,
810 TRB_TYPE(TRB_ADDR_DEV) | SLOT_ID_FOR_TRB(slot_id));
811}