Commit | Line | Data |
---|---|---|
66d4eadd SS |
1 | /* |
2 | * xHCI host controller driver | |
3 | * | |
4 | * Copyright (C) 2008 Intel Corp. | |
5 | * | |
6 | * Author: Sarah Sharp | |
7 | * Some code borrowed from the Linux EHCI driver. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
15 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 | * for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software Foundation, | |
20 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
21 | */ | |
22 | ||
23 | #include <linux/usb.h> | |
0ebbab37 | 24 | #include <linux/pci.h> |
527c6d7f | 25 | #include <linux/dmapool.h> |
66d4eadd SS |
26 | |
27 | #include "xhci.h" | |
28 | ||
0ebbab37 SS |
29 | /* |
30 | * Allocates a generic ring segment from the ring pool, sets the dma address, | |
31 | * initializes the segment to zero, and sets the private next pointer to NULL. | |
32 | * | |
33 | * Section 4.11.1.1: | |
34 | * "All components of all Command and Transfer TRBs shall be initialized to '0'" | |
35 | */ | |
36 | static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags) | |
37 | { | |
38 | struct xhci_segment *seg; | |
39 | dma_addr_t dma; | |
40 | ||
41 | seg = kzalloc(sizeof *seg, flags); | |
42 | if (!seg) | |
43 | return 0; | |
700e2052 | 44 | xhci_dbg(xhci, "Allocating priv segment structure at %p\n", seg); |
0ebbab37 SS |
45 | |
46 | seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma); | |
47 | if (!seg->trbs) { | |
48 | kfree(seg); | |
49 | return 0; | |
50 | } | |
700e2052 GKH |
51 | xhci_dbg(xhci, "// Allocating segment at %p (virtual) 0x%llx (DMA)\n", |
52 | seg->trbs, (unsigned long long)dma); | |
0ebbab37 SS |
53 | |
54 | memset(seg->trbs, 0, SEGMENT_SIZE); | |
55 | seg->dma = dma; | |
56 | seg->next = NULL; | |
57 | ||
58 | return seg; | |
59 | } | |
60 | ||
61 | static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) | |
62 | { | |
63 | if (!seg) | |
64 | return; | |
65 | if (seg->trbs) { | |
700e2052 GKH |
66 | xhci_dbg(xhci, "Freeing DMA segment at %p (virtual) 0x%llx (DMA)\n", |
67 | seg->trbs, (unsigned long long)seg->dma); | |
0ebbab37 SS |
68 | dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); |
69 | seg->trbs = NULL; | |
70 | } | |
700e2052 | 71 | xhci_dbg(xhci, "Freeing priv segment structure at %p\n", seg); |
0ebbab37 SS |
72 | kfree(seg); |
73 | } | |
74 | ||
75 | /* | |
76 | * Make the prev segment point to the next segment. | |
77 | * | |
78 | * Change the last TRB in the prev segment to be a Link TRB which points to the | |
79 | * DMA address of the next segment. The caller needs to set any Link TRB | |
80 | * related flags, such as End TRB, Toggle Cycle, and no snoop. | |
81 | */ | |
82 | static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, | |
83 | struct xhci_segment *next, bool link_trbs) | |
84 | { | |
85 | u32 val; | |
86 | ||
87 | if (!prev || !next) | |
88 | return; | |
89 | prev->next = next; | |
90 | if (link_trbs) { | |
8e595a5d | 91 | prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr = next->dma; |
0ebbab37 SS |
92 | |
93 | /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ | |
94 | val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; | |
95 | val &= ~TRB_TYPE_BITMASK; | |
96 | val |= TRB_TYPE(TRB_LINK); | |
b0567b3f SS |
97 | /* Always set the chain bit with 0.95 hardware */ |
98 | if (xhci_link_trb_quirk(xhci)) | |
99 | val |= TRB_CHAIN; | |
0ebbab37 SS |
100 | prev->trbs[TRBS_PER_SEGMENT-1].link.control = val; |
101 | } | |
700e2052 GKH |
102 | xhci_dbg(xhci, "Linking segment 0x%llx to segment 0x%llx (DMA)\n", |
103 | (unsigned long long)prev->dma, | |
104 | (unsigned long long)next->dma); | |
0ebbab37 SS |
105 | } |
106 | ||
107 | /* XXX: Do we need the hcd structure in all these functions? */ | |
f94e0186 | 108 | void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) |
0ebbab37 SS |
109 | { |
110 | struct xhci_segment *seg; | |
111 | struct xhci_segment *first_seg; | |
112 | ||
113 | if (!ring || !ring->first_seg) | |
114 | return; | |
115 | first_seg = ring->first_seg; | |
116 | seg = first_seg->next; | |
700e2052 | 117 | xhci_dbg(xhci, "Freeing ring at %p\n", ring); |
0ebbab37 SS |
118 | while (seg != first_seg) { |
119 | struct xhci_segment *next = seg->next; | |
120 | xhci_segment_free(xhci, seg); | |
121 | seg = next; | |
122 | } | |
123 | xhci_segment_free(xhci, first_seg); | |
124 | ring->first_seg = NULL; | |
125 | kfree(ring); | |
126 | } | |
127 | ||
74f9fe21 SS |
128 | static void xhci_initialize_ring_info(struct xhci_ring *ring) |
129 | { | |
130 | /* The ring is empty, so the enqueue pointer == dequeue pointer */ | |
131 | ring->enqueue = ring->first_seg->trbs; | |
132 | ring->enq_seg = ring->first_seg; | |
133 | ring->dequeue = ring->enqueue; | |
134 | ring->deq_seg = ring->first_seg; | |
135 | /* The ring is initialized to 0. The producer must write 1 to the cycle | |
136 | * bit to handover ownership of the TRB, so PCS = 1. The consumer must | |
137 | * compare CCS to the cycle bit to check ownership, so CCS = 1. | |
138 | */ | |
139 | ring->cycle_state = 1; | |
140 | /* Not necessary for new rings, but needed for re-initialized rings */ | |
141 | ring->enq_updates = 0; | |
142 | ring->deq_updates = 0; | |
143 | } | |
144 | ||
0ebbab37 SS |
145 | /** |
146 | * Create a new ring with zero or more segments. | |
147 | * | |
148 | * Link each segment together into a ring. | |
149 | * Set the end flag and the cycle toggle bit on the last segment. | |
150 | * See section 4.9.1 and figures 15 and 16. | |
151 | */ | |
152 | static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, | |
153 | unsigned int num_segs, bool link_trbs, gfp_t flags) | |
154 | { | |
155 | struct xhci_ring *ring; | |
156 | struct xhci_segment *prev; | |
157 | ||
158 | ring = kzalloc(sizeof *(ring), flags); | |
700e2052 | 159 | xhci_dbg(xhci, "Allocating ring at %p\n", ring); |
0ebbab37 SS |
160 | if (!ring) |
161 | return 0; | |
162 | ||
d0e96f5a | 163 | INIT_LIST_HEAD(&ring->td_list); |
0ebbab37 SS |
164 | if (num_segs == 0) |
165 | return ring; | |
166 | ||
167 | ring->first_seg = xhci_segment_alloc(xhci, flags); | |
168 | if (!ring->first_seg) | |
169 | goto fail; | |
170 | num_segs--; | |
171 | ||
172 | prev = ring->first_seg; | |
173 | while (num_segs > 0) { | |
174 | struct xhci_segment *next; | |
175 | ||
176 | next = xhci_segment_alloc(xhci, flags); | |
177 | if (!next) | |
178 | goto fail; | |
179 | xhci_link_segments(xhci, prev, next, link_trbs); | |
180 | ||
181 | prev = next; | |
182 | num_segs--; | |
183 | } | |
184 | xhci_link_segments(xhci, prev, ring->first_seg, link_trbs); | |
185 | ||
186 | if (link_trbs) { | |
187 | /* See section 4.9.2.1 and 6.4.4.1 */ | |
188 | prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE); | |
189 | xhci_dbg(xhci, "Wrote link toggle flag to" | |
700e2052 GKH |
190 | " segment %p (virtual), 0x%llx (DMA)\n", |
191 | prev, (unsigned long long)prev->dma); | |
0ebbab37 | 192 | } |
74f9fe21 | 193 | xhci_initialize_ring_info(ring); |
0ebbab37 SS |
194 | return ring; |
195 | ||
196 | fail: | |
197 | xhci_ring_free(xhci, ring); | |
198 | return 0; | |
199 | } | |
200 | ||
74f9fe21 SS |
201 | /* Zero an endpoint ring (except for link TRBs) and move the enqueue and dequeue |
202 | * pointers to the beginning of the ring. | |
203 | */ | |
204 | static void xhci_reinit_cached_ring(struct xhci_hcd *xhci, | |
205 | struct xhci_ring *ring) | |
206 | { | |
207 | struct xhci_segment *seg = ring->first_seg; | |
208 | do { | |
209 | memset(seg->trbs, 0, | |
210 | sizeof(union xhci_trb)*TRBS_PER_SEGMENT); | |
211 | /* All endpoint rings have link TRBs */ | |
212 | xhci_link_segments(xhci, seg, seg->next, 1); | |
213 | seg = seg->next; | |
214 | } while (seg != ring->first_seg); | |
215 | xhci_initialize_ring_info(ring); | |
216 | /* td list should be empty since all URBs have been cancelled, | |
217 | * but just in case... | |
218 | */ | |
219 | INIT_LIST_HEAD(&ring->td_list); | |
220 | } | |
221 | ||
d115b048 JY |
222 | #define CTX_SIZE(_hcc) (HCC_64BYTE_CONTEXT(_hcc) ? 64 : 32) |
223 | ||
224 | struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci, | |
225 | int type, gfp_t flags) | |
226 | { | |
227 | struct xhci_container_ctx *ctx = kzalloc(sizeof(*ctx), flags); | |
228 | if (!ctx) | |
229 | return NULL; | |
230 | ||
231 | BUG_ON((type != XHCI_CTX_TYPE_DEVICE) && (type != XHCI_CTX_TYPE_INPUT)); | |
232 | ctx->type = type; | |
233 | ctx->size = HCC_64BYTE_CONTEXT(xhci->hcc_params) ? 2048 : 1024; | |
234 | if (type == XHCI_CTX_TYPE_INPUT) | |
235 | ctx->size += CTX_SIZE(xhci->hcc_params); | |
236 | ||
237 | ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma); | |
238 | memset(ctx->bytes, 0, ctx->size); | |
239 | return ctx; | |
240 | } | |
241 | ||
242 | void xhci_free_container_ctx(struct xhci_hcd *xhci, | |
243 | struct xhci_container_ctx *ctx) | |
244 | { | |
245 | dma_pool_free(xhci->device_pool, ctx->bytes, ctx->dma); | |
246 | kfree(ctx); | |
247 | } | |
248 | ||
249 | struct xhci_input_control_ctx *xhci_get_input_control_ctx(struct xhci_hcd *xhci, | |
250 | struct xhci_container_ctx *ctx) | |
251 | { | |
252 | BUG_ON(ctx->type != XHCI_CTX_TYPE_INPUT); | |
253 | return (struct xhci_input_control_ctx *)ctx->bytes; | |
254 | } | |
255 | ||
256 | struct xhci_slot_ctx *xhci_get_slot_ctx(struct xhci_hcd *xhci, | |
257 | struct xhci_container_ctx *ctx) | |
258 | { | |
259 | if (ctx->type == XHCI_CTX_TYPE_DEVICE) | |
260 | return (struct xhci_slot_ctx *)ctx->bytes; | |
261 | ||
262 | return (struct xhci_slot_ctx *) | |
263 | (ctx->bytes + CTX_SIZE(xhci->hcc_params)); | |
264 | } | |
265 | ||
266 | struct xhci_ep_ctx *xhci_get_ep_ctx(struct xhci_hcd *xhci, | |
267 | struct xhci_container_ctx *ctx, | |
268 | unsigned int ep_index) | |
269 | { | |
270 | /* increment ep index by offset of start of ep ctx array */ | |
271 | ep_index++; | |
272 | if (ctx->type == XHCI_CTX_TYPE_INPUT) | |
273 | ep_index++; | |
274 | ||
275 | return (struct xhci_ep_ctx *) | |
276 | (ctx->bytes + (ep_index * CTX_SIZE(xhci->hcc_params))); | |
277 | } | |
278 | ||
6f5165cf SS |
279 | static void xhci_init_endpoint_timer(struct xhci_hcd *xhci, |
280 | struct xhci_virt_ep *ep) | |
281 | { | |
282 | init_timer(&ep->stop_cmd_timer); | |
283 | ep->stop_cmd_timer.data = (unsigned long) ep; | |
284 | ep->stop_cmd_timer.function = xhci_stop_endpoint_command_watchdog; | |
285 | ep->xhci = xhci; | |
286 | } | |
287 | ||
d0e96f5a | 288 | /* All the xhci_tds in the ring's TD list should be freed at this point */ |
3ffbba95 SS |
289 | void xhci_free_virt_device(struct xhci_hcd *xhci, int slot_id) |
290 | { | |
291 | struct xhci_virt_device *dev; | |
292 | int i; | |
293 | ||
294 | /* Slot ID 0 is reserved */ | |
295 | if (slot_id == 0 || !xhci->devs[slot_id]) | |
296 | return; | |
297 | ||
298 | dev = xhci->devs[slot_id]; | |
8e595a5d | 299 | xhci->dcbaa->dev_context_ptrs[slot_id] = 0; |
3ffbba95 SS |
300 | if (!dev) |
301 | return; | |
302 | ||
303 | for (i = 0; i < 31; ++i) | |
63a0d9ab SS |
304 | if (dev->eps[i].ring) |
305 | xhci_ring_free(xhci, dev->eps[i].ring); | |
3ffbba95 | 306 | |
74f9fe21 SS |
307 | if (dev->ring_cache) { |
308 | for (i = 0; i < dev->num_rings_cached; i++) | |
309 | xhci_ring_free(xhci, dev->ring_cache[i]); | |
310 | kfree(dev->ring_cache); | |
311 | } | |
312 | ||
3ffbba95 | 313 | if (dev->in_ctx) |
d115b048 | 314 | xhci_free_container_ctx(xhci, dev->in_ctx); |
3ffbba95 | 315 | if (dev->out_ctx) |
d115b048 JY |
316 | xhci_free_container_ctx(xhci, dev->out_ctx); |
317 | ||
3ffbba95 SS |
318 | kfree(xhci->devs[slot_id]); |
319 | xhci->devs[slot_id] = 0; | |
320 | } | |
321 | ||
322 | int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id, | |
323 | struct usb_device *udev, gfp_t flags) | |
324 | { | |
3ffbba95 | 325 | struct xhci_virt_device *dev; |
63a0d9ab | 326 | int i; |
3ffbba95 SS |
327 | |
328 | /* Slot ID 0 is reserved */ | |
329 | if (slot_id == 0 || xhci->devs[slot_id]) { | |
330 | xhci_warn(xhci, "Bad Slot ID %d\n", slot_id); | |
331 | return 0; | |
332 | } | |
333 | ||
334 | xhci->devs[slot_id] = kzalloc(sizeof(*xhci->devs[slot_id]), flags); | |
335 | if (!xhci->devs[slot_id]) | |
336 | return 0; | |
337 | dev = xhci->devs[slot_id]; | |
338 | ||
d115b048 JY |
339 | /* Allocate the (output) device context that will be used in the HC. */ |
340 | dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags); | |
3ffbba95 SS |
341 | if (!dev->out_ctx) |
342 | goto fail; | |
d115b048 | 343 | |
700e2052 | 344 | xhci_dbg(xhci, "Slot %d output ctx = 0x%llx (dma)\n", slot_id, |
d115b048 | 345 | (unsigned long long)dev->out_ctx->dma); |
3ffbba95 SS |
346 | |
347 | /* Allocate the (input) device context for address device command */ | |
d115b048 | 348 | dev->in_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, flags); |
3ffbba95 SS |
349 | if (!dev->in_ctx) |
350 | goto fail; | |
d115b048 | 351 | |
700e2052 | 352 | xhci_dbg(xhci, "Slot %d input ctx = 0x%llx (dma)\n", slot_id, |
d115b048 | 353 | (unsigned long long)dev->in_ctx->dma); |
3ffbba95 | 354 | |
6f5165cf SS |
355 | /* Initialize the cancellation list and watchdog timers for each ep */ |
356 | for (i = 0; i < 31; i++) { | |
357 | xhci_init_endpoint_timer(xhci, &dev->eps[i]); | |
63a0d9ab | 358 | INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list); |
6f5165cf | 359 | } |
63a0d9ab | 360 | |
3ffbba95 | 361 | /* Allocate endpoint 0 ring */ |
63a0d9ab SS |
362 | dev->eps[0].ring = xhci_ring_alloc(xhci, 1, true, flags); |
363 | if (!dev->eps[0].ring) | |
3ffbba95 SS |
364 | goto fail; |
365 | ||
74f9fe21 SS |
366 | /* Allocate pointers to the ring cache */ |
367 | dev->ring_cache = kzalloc( | |
368 | sizeof(struct xhci_ring *)*XHCI_MAX_RINGS_CACHED, | |
369 | flags); | |
370 | if (!dev->ring_cache) | |
371 | goto fail; | |
372 | dev->num_rings_cached = 0; | |
373 | ||
f94e0186 | 374 | init_completion(&dev->cmd_completion); |
913a8a34 | 375 | INIT_LIST_HEAD(&dev->cmd_list); |
f94e0186 | 376 | |
28c2d2ef | 377 | /* Point to output device context in dcbaa. */ |
d115b048 | 378 | xhci->dcbaa->dev_context_ptrs[slot_id] = dev->out_ctx->dma; |
700e2052 | 379 | xhci_dbg(xhci, "Set slot id %d dcbaa entry %p to 0x%llx\n", |
3ffbba95 | 380 | slot_id, |
8e595a5d | 381 | &xhci->dcbaa->dev_context_ptrs[slot_id], |
28c2d2ef | 382 | (unsigned long long) xhci->dcbaa->dev_context_ptrs[slot_id]); |
3ffbba95 SS |
383 | |
384 | return 1; | |
385 | fail: | |
386 | xhci_free_virt_device(xhci, slot_id); | |
387 | return 0; | |
388 | } | |
389 | ||
390 | /* Setup an xHCI virtual device for a Set Address command */ | |
391 | int xhci_setup_addressable_virt_dev(struct xhci_hcd *xhci, struct usb_device *udev) | |
392 | { | |
393 | struct xhci_virt_device *dev; | |
394 | struct xhci_ep_ctx *ep0_ctx; | |
395 | struct usb_device *top_dev; | |
d115b048 JY |
396 | struct xhci_slot_ctx *slot_ctx; |
397 | struct xhci_input_control_ctx *ctrl_ctx; | |
3ffbba95 SS |
398 | |
399 | dev = xhci->devs[udev->slot_id]; | |
400 | /* Slot ID 0 is reserved */ | |
401 | if (udev->slot_id == 0 || !dev) { | |
402 | xhci_warn(xhci, "Slot ID %d is not assigned to this device\n", | |
403 | udev->slot_id); | |
404 | return -EINVAL; | |
405 | } | |
d115b048 JY |
406 | ep0_ctx = xhci_get_ep_ctx(xhci, dev->in_ctx, 0); |
407 | ctrl_ctx = xhci_get_input_control_ctx(xhci, dev->in_ctx); | |
408 | slot_ctx = xhci_get_slot_ctx(xhci, dev->in_ctx); | |
3ffbba95 SS |
409 | |
410 | /* 2) New slot context and endpoint 0 context are valid*/ | |
d115b048 | 411 | ctrl_ctx->add_flags = SLOT_FLAG | EP0_FLAG; |
3ffbba95 SS |
412 | |
413 | /* 3) Only the control endpoint is valid - one endpoint context */ | |
d115b048 | 414 | slot_ctx->dev_info |= LAST_CTX(1); |
3ffbba95 | 415 | |
4a0cd967 | 416 | slot_ctx->dev_info |= (u32) udev->route; |
3ffbba95 SS |
417 | switch (udev->speed) { |
418 | case USB_SPEED_SUPER: | |
d115b048 | 419 | slot_ctx->dev_info |= (u32) SLOT_SPEED_SS; |
3ffbba95 SS |
420 | break; |
421 | case USB_SPEED_HIGH: | |
d115b048 | 422 | slot_ctx->dev_info |= (u32) SLOT_SPEED_HS; |
3ffbba95 SS |
423 | break; |
424 | case USB_SPEED_FULL: | |
d115b048 | 425 | slot_ctx->dev_info |= (u32) SLOT_SPEED_FS; |
3ffbba95 SS |
426 | break; |
427 | case USB_SPEED_LOW: | |
d115b048 | 428 | slot_ctx->dev_info |= (u32) SLOT_SPEED_LS; |
3ffbba95 SS |
429 | break; |
430 | case USB_SPEED_VARIABLE: | |
431 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); | |
432 | return -EINVAL; | |
433 | break; | |
434 | default: | |
435 | /* Speed was set earlier, this shouldn't happen. */ | |
436 | BUG(); | |
437 | } | |
438 | /* Find the root hub port this device is under */ | |
439 | for (top_dev = udev; top_dev->parent && top_dev->parent->parent; | |
440 | top_dev = top_dev->parent) | |
441 | /* Found device below root hub */; | |
d115b048 | 442 | slot_ctx->dev_info2 |= (u32) ROOT_HUB_PORT(top_dev->portnum); |
3ffbba95 SS |
443 | xhci_dbg(xhci, "Set root hub portnum to %d\n", top_dev->portnum); |
444 | ||
445 | /* Is this a LS/FS device under a HS hub? */ | |
3ffbba95 SS |
446 | if ((udev->speed == USB_SPEED_LOW || udev->speed == USB_SPEED_FULL) && |
447 | udev->tt) { | |
d115b048 JY |
448 | slot_ctx->tt_info = udev->tt->hub->slot_id; |
449 | slot_ctx->tt_info |= udev->ttport << 8; | |
07b6de10 SS |
450 | if (udev->tt->multi) |
451 | slot_ctx->dev_info |= DEV_MTT; | |
3ffbba95 | 452 | } |
700e2052 | 453 | xhci_dbg(xhci, "udev->tt = %p\n", udev->tt); |
3ffbba95 SS |
454 | xhci_dbg(xhci, "udev->ttport = 0x%x\n", udev->ttport); |
455 | ||
456 | /* Step 4 - ring already allocated */ | |
457 | /* Step 5 */ | |
458 | ep0_ctx->ep_info2 = EP_TYPE(CTRL_EP); | |
459 | /* | |
3ffbba95 SS |
460 | * XXX: Not sure about wireless USB devices. |
461 | */ | |
47aded8a SS |
462 | switch (udev->speed) { |
463 | case USB_SPEED_SUPER: | |
3ffbba95 | 464 | ep0_ctx->ep_info2 |= MAX_PACKET(512); |
47aded8a SS |
465 | break; |
466 | case USB_SPEED_HIGH: | |
467 | /* USB core guesses at a 64-byte max packet first for FS devices */ | |
468 | case USB_SPEED_FULL: | |
469 | ep0_ctx->ep_info2 |= MAX_PACKET(64); | |
470 | break; | |
471 | case USB_SPEED_LOW: | |
3ffbba95 | 472 | ep0_ctx->ep_info2 |= MAX_PACKET(8); |
47aded8a SS |
473 | break; |
474 | case USB_SPEED_VARIABLE: | |
475 | xhci_dbg(xhci, "FIXME xHCI doesn't support wireless speeds\n"); | |
476 | return -EINVAL; | |
477 | break; | |
478 | default: | |
479 | /* New speed? */ | |
480 | BUG(); | |
481 | } | |
3ffbba95 SS |
482 | /* EP 0 can handle "burst" sizes of 1, so Max Burst Size field is 0 */ |
483 | ep0_ctx->ep_info2 |= MAX_BURST(0); | |
484 | ep0_ctx->ep_info2 |= ERROR_COUNT(3); | |
485 | ||
8e595a5d | 486 | ep0_ctx->deq = |
63a0d9ab SS |
487 | dev->eps[0].ring->first_seg->dma; |
488 | ep0_ctx->deq |= dev->eps[0].ring->cycle_state; | |
3ffbba95 SS |
489 | |
490 | /* Steps 7 and 8 were done in xhci_alloc_virt_device() */ | |
491 | ||
492 | return 0; | |
493 | } | |
494 | ||
f94e0186 SS |
495 | /* Return the polling or NAK interval. |
496 | * | |
497 | * The polling interval is expressed in "microframes". If xHCI's Interval field | |
498 | * is set to N, it will service the endpoint every 2^(Interval)*125us. | |
499 | * | |
500 | * The NAK interval is one NAK per 1 to 255 microframes, or no NAKs if interval | |
501 | * is set to 0. | |
502 | */ | |
503 | static inline unsigned int xhci_get_endpoint_interval(struct usb_device *udev, | |
504 | struct usb_host_endpoint *ep) | |
505 | { | |
506 | unsigned int interval = 0; | |
507 | ||
508 | switch (udev->speed) { | |
509 | case USB_SPEED_HIGH: | |
510 | /* Max NAK rate */ | |
511 | if (usb_endpoint_xfer_control(&ep->desc) || | |
512 | usb_endpoint_xfer_bulk(&ep->desc)) | |
513 | interval = ep->desc.bInterval; | |
514 | /* Fall through - SS and HS isoc/int have same decoding */ | |
515 | case USB_SPEED_SUPER: | |
516 | if (usb_endpoint_xfer_int(&ep->desc) || | |
517 | usb_endpoint_xfer_isoc(&ep->desc)) { | |
518 | if (ep->desc.bInterval == 0) | |
519 | interval = 0; | |
520 | else | |
521 | interval = ep->desc.bInterval - 1; | |
522 | if (interval > 15) | |
523 | interval = 15; | |
524 | if (interval != ep->desc.bInterval + 1) | |
525 | dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n", | |
526 | ep->desc.bEndpointAddress, 1 << interval); | |
527 | } | |
528 | break; | |
529 | /* Convert bInterval (in 1-255 frames) to microframes and round down to | |
530 | * nearest power of 2. | |
531 | */ | |
532 | case USB_SPEED_FULL: | |
533 | case USB_SPEED_LOW: | |
534 | if (usb_endpoint_xfer_int(&ep->desc) || | |
535 | usb_endpoint_xfer_isoc(&ep->desc)) { | |
536 | interval = fls(8*ep->desc.bInterval) - 1; | |
537 | if (interval > 10) | |
538 | interval = 10; | |
539 | if (interval < 3) | |
540 | interval = 3; | |
541 | if ((1 << interval) != 8*ep->desc.bInterval) | |
542 | dev_warn(&udev->dev, "ep %#x - rounding interval to %d microframes\n", | |
543 | ep->desc.bEndpointAddress, 1 << interval); | |
544 | } | |
545 | break; | |
546 | default: | |
547 | BUG(); | |
548 | } | |
549 | return EP_INTERVAL(interval); | |
550 | } | |
551 | ||
552 | static inline u32 xhci_get_endpoint_type(struct usb_device *udev, | |
553 | struct usb_host_endpoint *ep) | |
554 | { | |
555 | int in; | |
556 | u32 type; | |
557 | ||
558 | in = usb_endpoint_dir_in(&ep->desc); | |
559 | if (usb_endpoint_xfer_control(&ep->desc)) { | |
560 | type = EP_TYPE(CTRL_EP); | |
561 | } else if (usb_endpoint_xfer_bulk(&ep->desc)) { | |
562 | if (in) | |
563 | type = EP_TYPE(BULK_IN_EP); | |
564 | else | |
565 | type = EP_TYPE(BULK_OUT_EP); | |
566 | } else if (usb_endpoint_xfer_isoc(&ep->desc)) { | |
567 | if (in) | |
568 | type = EP_TYPE(ISOC_IN_EP); | |
569 | else | |
570 | type = EP_TYPE(ISOC_OUT_EP); | |
571 | } else if (usb_endpoint_xfer_int(&ep->desc)) { | |
572 | if (in) | |
573 | type = EP_TYPE(INT_IN_EP); | |
574 | else | |
575 | type = EP_TYPE(INT_OUT_EP); | |
576 | } else { | |
577 | BUG(); | |
578 | } | |
579 | return type; | |
580 | } | |
581 | ||
582 | int xhci_endpoint_init(struct xhci_hcd *xhci, | |
583 | struct xhci_virt_device *virt_dev, | |
584 | struct usb_device *udev, | |
f88ba78d SS |
585 | struct usb_host_endpoint *ep, |
586 | gfp_t mem_flags) | |
f94e0186 SS |
587 | { |
588 | unsigned int ep_index; | |
589 | struct xhci_ep_ctx *ep_ctx; | |
590 | struct xhci_ring *ep_ring; | |
591 | unsigned int max_packet; | |
592 | unsigned int max_burst; | |
593 | ||
594 | ep_index = xhci_get_endpoint_index(&ep->desc); | |
d115b048 | 595 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); |
f94e0186 SS |
596 | |
597 | /* Set up the endpoint ring */ | |
63a0d9ab SS |
598 | virt_dev->eps[ep_index].new_ring = |
599 | xhci_ring_alloc(xhci, 1, true, mem_flags); | |
74f9fe21 SS |
600 | if (!virt_dev->eps[ep_index].new_ring) { |
601 | /* Attempt to use the ring cache */ | |
602 | if (virt_dev->num_rings_cached == 0) | |
603 | return -ENOMEM; | |
604 | virt_dev->eps[ep_index].new_ring = | |
605 | virt_dev->ring_cache[virt_dev->num_rings_cached]; | |
606 | virt_dev->ring_cache[virt_dev->num_rings_cached] = NULL; | |
607 | virt_dev->num_rings_cached--; | |
608 | xhci_reinit_cached_ring(xhci, virt_dev->eps[ep_index].new_ring); | |
609 | } | |
63a0d9ab | 610 | ep_ring = virt_dev->eps[ep_index].new_ring; |
8e595a5d | 611 | ep_ctx->deq = ep_ring->first_seg->dma | ep_ring->cycle_state; |
f94e0186 SS |
612 | |
613 | ep_ctx->ep_info = xhci_get_endpoint_interval(udev, ep); | |
614 | ||
615 | /* FIXME dig Mult and streams info out of ep companion desc */ | |
616 | ||
47692d17 SS |
617 | /* Allow 3 retries for everything but isoc; |
618 | * error count = 0 means infinite retries. | |
619 | */ | |
f94e0186 SS |
620 | if (!usb_endpoint_xfer_isoc(&ep->desc)) |
621 | ep_ctx->ep_info2 = ERROR_COUNT(3); | |
622 | else | |
47692d17 | 623 | ep_ctx->ep_info2 = ERROR_COUNT(1); |
f94e0186 SS |
624 | |
625 | ep_ctx->ep_info2 |= xhci_get_endpoint_type(udev, ep); | |
626 | ||
627 | /* Set the max packet size and max burst */ | |
628 | switch (udev->speed) { | |
629 | case USB_SPEED_SUPER: | |
630 | max_packet = ep->desc.wMaxPacketSize; | |
631 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet); | |
b10de142 | 632 | /* dig out max burst from ep companion desc */ |
b7d6d998 SS |
633 | if (!ep->ss_ep_comp) { |
634 | xhci_warn(xhci, "WARN no SS endpoint companion descriptor.\n"); | |
635 | max_packet = 0; | |
636 | } else { | |
637 | max_packet = ep->ss_ep_comp->desc.bMaxBurst; | |
638 | } | |
b10de142 | 639 | ep_ctx->ep_info2 |= MAX_BURST(max_packet); |
f94e0186 SS |
640 | break; |
641 | case USB_SPEED_HIGH: | |
642 | /* bits 11:12 specify the number of additional transaction | |
643 | * opportunities per microframe (USB 2.0, section 9.6.6) | |
644 | */ | |
645 | if (usb_endpoint_xfer_isoc(&ep->desc) || | |
646 | usb_endpoint_xfer_int(&ep->desc)) { | |
647 | max_burst = (ep->desc.wMaxPacketSize & 0x1800) >> 11; | |
648 | ep_ctx->ep_info2 |= MAX_BURST(max_burst); | |
649 | } | |
650 | /* Fall through */ | |
651 | case USB_SPEED_FULL: | |
652 | case USB_SPEED_LOW: | |
653 | max_packet = ep->desc.wMaxPacketSize & 0x3ff; | |
654 | ep_ctx->ep_info2 |= MAX_PACKET(max_packet); | |
655 | break; | |
656 | default: | |
657 | BUG(); | |
658 | } | |
659 | /* FIXME Debug endpoint context */ | |
660 | return 0; | |
661 | } | |
662 | ||
663 | void xhci_endpoint_zero(struct xhci_hcd *xhci, | |
664 | struct xhci_virt_device *virt_dev, | |
665 | struct usb_host_endpoint *ep) | |
666 | { | |
667 | unsigned int ep_index; | |
668 | struct xhci_ep_ctx *ep_ctx; | |
669 | ||
670 | ep_index = xhci_get_endpoint_index(&ep->desc); | |
d115b048 | 671 | ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); |
f94e0186 SS |
672 | |
673 | ep_ctx->ep_info = 0; | |
674 | ep_ctx->ep_info2 = 0; | |
8e595a5d | 675 | ep_ctx->deq = 0; |
f94e0186 SS |
676 | ep_ctx->tx_info = 0; |
677 | /* Don't free the endpoint ring until the set interface or configuration | |
678 | * request succeeds. | |
679 | */ | |
680 | } | |
681 | ||
f2217e8e SS |
682 | /* Copy output xhci_ep_ctx to the input xhci_ep_ctx copy. |
683 | * Useful when you want to change one particular aspect of the endpoint and then | |
684 | * issue a configure endpoint command. | |
685 | */ | |
686 | void xhci_endpoint_copy(struct xhci_hcd *xhci, | |
913a8a34 SS |
687 | struct xhci_container_ctx *in_ctx, |
688 | struct xhci_container_ctx *out_ctx, | |
689 | unsigned int ep_index) | |
f2217e8e SS |
690 | { |
691 | struct xhci_ep_ctx *out_ep_ctx; | |
692 | struct xhci_ep_ctx *in_ep_ctx; | |
693 | ||
913a8a34 SS |
694 | out_ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index); |
695 | in_ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index); | |
f2217e8e SS |
696 | |
697 | in_ep_ctx->ep_info = out_ep_ctx->ep_info; | |
698 | in_ep_ctx->ep_info2 = out_ep_ctx->ep_info2; | |
699 | in_ep_ctx->deq = out_ep_ctx->deq; | |
700 | in_ep_ctx->tx_info = out_ep_ctx->tx_info; | |
701 | } | |
702 | ||
703 | /* Copy output xhci_slot_ctx to the input xhci_slot_ctx. | |
704 | * Useful when you want to change one particular aspect of the endpoint and then | |
705 | * issue a configure endpoint command. Only the context entries field matters, | |
706 | * but we'll copy the whole thing anyway. | |
707 | */ | |
913a8a34 SS |
708 | void xhci_slot_copy(struct xhci_hcd *xhci, |
709 | struct xhci_container_ctx *in_ctx, | |
710 | struct xhci_container_ctx *out_ctx) | |
f2217e8e SS |
711 | { |
712 | struct xhci_slot_ctx *in_slot_ctx; | |
713 | struct xhci_slot_ctx *out_slot_ctx; | |
714 | ||
913a8a34 SS |
715 | in_slot_ctx = xhci_get_slot_ctx(xhci, in_ctx); |
716 | out_slot_ctx = xhci_get_slot_ctx(xhci, out_ctx); | |
f2217e8e SS |
717 | |
718 | in_slot_ctx->dev_info = out_slot_ctx->dev_info; | |
719 | in_slot_ctx->dev_info2 = out_slot_ctx->dev_info2; | |
720 | in_slot_ctx->tt_info = out_slot_ctx->tt_info; | |
721 | in_slot_ctx->dev_state = out_slot_ctx->dev_state; | |
722 | } | |
723 | ||
254c80a3 JY |
724 | /* Set up the scratchpad buffer array and scratchpad buffers, if needed. */ |
725 | static int scratchpad_alloc(struct xhci_hcd *xhci, gfp_t flags) | |
726 | { | |
727 | int i; | |
728 | struct device *dev = xhci_to_hcd(xhci)->self.controller; | |
729 | int num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); | |
730 | ||
731 | xhci_dbg(xhci, "Allocating %d scratchpad buffers\n", num_sp); | |
732 | ||
733 | if (!num_sp) | |
734 | return 0; | |
735 | ||
736 | xhci->scratchpad = kzalloc(sizeof(*xhci->scratchpad), flags); | |
737 | if (!xhci->scratchpad) | |
738 | goto fail_sp; | |
739 | ||
740 | xhci->scratchpad->sp_array = | |
741 | pci_alloc_consistent(to_pci_dev(dev), | |
742 | num_sp * sizeof(u64), | |
743 | &xhci->scratchpad->sp_dma); | |
744 | if (!xhci->scratchpad->sp_array) | |
745 | goto fail_sp2; | |
746 | ||
747 | xhci->scratchpad->sp_buffers = kzalloc(sizeof(void *) * num_sp, flags); | |
748 | if (!xhci->scratchpad->sp_buffers) | |
749 | goto fail_sp3; | |
750 | ||
751 | xhci->scratchpad->sp_dma_buffers = | |
752 | kzalloc(sizeof(dma_addr_t) * num_sp, flags); | |
753 | ||
754 | if (!xhci->scratchpad->sp_dma_buffers) | |
755 | goto fail_sp4; | |
756 | ||
757 | xhci->dcbaa->dev_context_ptrs[0] = xhci->scratchpad->sp_dma; | |
758 | for (i = 0; i < num_sp; i++) { | |
759 | dma_addr_t dma; | |
760 | void *buf = pci_alloc_consistent(to_pci_dev(dev), | |
761 | xhci->page_size, &dma); | |
762 | if (!buf) | |
763 | goto fail_sp5; | |
764 | ||
765 | xhci->scratchpad->sp_array[i] = dma; | |
766 | xhci->scratchpad->sp_buffers[i] = buf; | |
767 | xhci->scratchpad->sp_dma_buffers[i] = dma; | |
768 | } | |
769 | ||
770 | return 0; | |
771 | ||
772 | fail_sp5: | |
773 | for (i = i - 1; i >= 0; i--) { | |
774 | pci_free_consistent(to_pci_dev(dev), xhci->page_size, | |
775 | xhci->scratchpad->sp_buffers[i], | |
776 | xhci->scratchpad->sp_dma_buffers[i]); | |
777 | } | |
778 | kfree(xhci->scratchpad->sp_dma_buffers); | |
779 | ||
780 | fail_sp4: | |
781 | kfree(xhci->scratchpad->sp_buffers); | |
782 | ||
783 | fail_sp3: | |
784 | pci_free_consistent(to_pci_dev(dev), num_sp * sizeof(u64), | |
785 | xhci->scratchpad->sp_array, | |
786 | xhci->scratchpad->sp_dma); | |
787 | ||
788 | fail_sp2: | |
789 | kfree(xhci->scratchpad); | |
790 | xhci->scratchpad = NULL; | |
791 | ||
792 | fail_sp: | |
793 | return -ENOMEM; | |
794 | } | |
795 | ||
796 | static void scratchpad_free(struct xhci_hcd *xhci) | |
797 | { | |
798 | int num_sp; | |
799 | int i; | |
800 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); | |
801 | ||
802 | if (!xhci->scratchpad) | |
803 | return; | |
804 | ||
805 | num_sp = HCS_MAX_SCRATCHPAD(xhci->hcs_params2); | |
806 | ||
807 | for (i = 0; i < num_sp; i++) { | |
808 | pci_free_consistent(pdev, xhci->page_size, | |
809 | xhci->scratchpad->sp_buffers[i], | |
810 | xhci->scratchpad->sp_dma_buffers[i]); | |
811 | } | |
812 | kfree(xhci->scratchpad->sp_dma_buffers); | |
813 | kfree(xhci->scratchpad->sp_buffers); | |
814 | pci_free_consistent(pdev, num_sp * sizeof(u64), | |
815 | xhci->scratchpad->sp_array, | |
816 | xhci->scratchpad->sp_dma); | |
817 | kfree(xhci->scratchpad); | |
818 | xhci->scratchpad = NULL; | |
819 | } | |
820 | ||
913a8a34 SS |
821 | struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci, |
822 | bool allocate_completion, gfp_t mem_flags) | |
823 | { | |
824 | struct xhci_command *command; | |
825 | ||
826 | command = kzalloc(sizeof(*command), mem_flags); | |
827 | if (!command) | |
828 | return NULL; | |
829 | ||
830 | command->in_ctx = | |
831 | xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_INPUT, mem_flags); | |
06e18291 JL |
832 | if (!command->in_ctx) { |
833 | kfree(command); | |
913a8a34 | 834 | return NULL; |
06e18291 | 835 | } |
913a8a34 SS |
836 | |
837 | if (allocate_completion) { | |
838 | command->completion = | |
839 | kzalloc(sizeof(struct completion), mem_flags); | |
840 | if (!command->completion) { | |
841 | xhci_free_container_ctx(xhci, command->in_ctx); | |
06e18291 | 842 | kfree(command); |
913a8a34 SS |
843 | return NULL; |
844 | } | |
845 | init_completion(command->completion); | |
846 | } | |
847 | ||
848 | command->status = 0; | |
849 | INIT_LIST_HEAD(&command->cmd_list); | |
850 | return command; | |
851 | } | |
852 | ||
853 | void xhci_free_command(struct xhci_hcd *xhci, | |
854 | struct xhci_command *command) | |
855 | { | |
856 | xhci_free_container_ctx(xhci, | |
857 | command->in_ctx); | |
858 | kfree(command->completion); | |
859 | kfree(command); | |
860 | } | |
861 | ||
66d4eadd SS |
862 | void xhci_mem_cleanup(struct xhci_hcd *xhci) |
863 | { | |
0ebbab37 SS |
864 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
865 | int size; | |
3ffbba95 | 866 | int i; |
0ebbab37 SS |
867 | |
868 | /* Free the Event Ring Segment Table and the actual Event Ring */ | |
d94c05e3 SS |
869 | if (xhci->ir_set) { |
870 | xhci_writel(xhci, 0, &xhci->ir_set->erst_size); | |
871 | xhci_write_64(xhci, 0, &xhci->ir_set->erst_base); | |
872 | xhci_write_64(xhci, 0, &xhci->ir_set->erst_dequeue); | |
873 | } | |
0ebbab37 SS |
874 | size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); |
875 | if (xhci->erst.entries) | |
876 | pci_free_consistent(pdev, size, | |
877 | xhci->erst.entries, xhci->erst.erst_dma_addr); | |
878 | xhci->erst.entries = NULL; | |
879 | xhci_dbg(xhci, "Freed ERST\n"); | |
880 | if (xhci->event_ring) | |
881 | xhci_ring_free(xhci, xhci->event_ring); | |
882 | xhci->event_ring = NULL; | |
883 | xhci_dbg(xhci, "Freed event ring\n"); | |
884 | ||
8e595a5d | 885 | xhci_write_64(xhci, 0, &xhci->op_regs->cmd_ring); |
0ebbab37 SS |
886 | if (xhci->cmd_ring) |
887 | xhci_ring_free(xhci, xhci->cmd_ring); | |
888 | xhci->cmd_ring = NULL; | |
889 | xhci_dbg(xhci, "Freed command ring\n"); | |
3ffbba95 SS |
890 | |
891 | for (i = 1; i < MAX_HC_SLOTS; ++i) | |
892 | xhci_free_virt_device(xhci, i); | |
893 | ||
0ebbab37 SS |
894 | if (xhci->segment_pool) |
895 | dma_pool_destroy(xhci->segment_pool); | |
896 | xhci->segment_pool = NULL; | |
897 | xhci_dbg(xhci, "Freed segment pool\n"); | |
3ffbba95 SS |
898 | |
899 | if (xhci->device_pool) | |
900 | dma_pool_destroy(xhci->device_pool); | |
901 | xhci->device_pool = NULL; | |
902 | xhci_dbg(xhci, "Freed device context pool\n"); | |
903 | ||
8e595a5d | 904 | xhci_write_64(xhci, 0, &xhci->op_regs->dcbaa_ptr); |
a74588f9 SS |
905 | if (xhci->dcbaa) |
906 | pci_free_consistent(pdev, sizeof(*xhci->dcbaa), | |
907 | xhci->dcbaa, xhci->dcbaa->dma); | |
908 | xhci->dcbaa = NULL; | |
3ffbba95 | 909 | |
5294bea4 | 910 | scratchpad_free(xhci); |
66d4eadd SS |
911 | xhci->page_size = 0; |
912 | xhci->page_shift = 0; | |
913 | } | |
914 | ||
6648f29d SS |
915 | static int xhci_test_trb_in_td(struct xhci_hcd *xhci, |
916 | struct xhci_segment *input_seg, | |
917 | union xhci_trb *start_trb, | |
918 | union xhci_trb *end_trb, | |
919 | dma_addr_t input_dma, | |
920 | struct xhci_segment *result_seg, | |
921 | char *test_name, int test_number) | |
922 | { | |
923 | unsigned long long start_dma; | |
924 | unsigned long long end_dma; | |
925 | struct xhci_segment *seg; | |
926 | ||
927 | start_dma = xhci_trb_virt_to_dma(input_seg, start_trb); | |
928 | end_dma = xhci_trb_virt_to_dma(input_seg, end_trb); | |
929 | ||
930 | seg = trb_in_td(input_seg, start_trb, end_trb, input_dma); | |
931 | if (seg != result_seg) { | |
932 | xhci_warn(xhci, "WARN: %s TRB math test %d failed!\n", | |
933 | test_name, test_number); | |
934 | xhci_warn(xhci, "Tested TRB math w/ seg %p and " | |
935 | "input DMA 0x%llx\n", | |
936 | input_seg, | |
937 | (unsigned long long) input_dma); | |
938 | xhci_warn(xhci, "starting TRB %p (0x%llx DMA), " | |
939 | "ending TRB %p (0x%llx DMA)\n", | |
940 | start_trb, start_dma, | |
941 | end_trb, end_dma); | |
942 | xhci_warn(xhci, "Expected seg %p, got seg %p\n", | |
943 | result_seg, seg); | |
944 | return -1; | |
945 | } | |
946 | return 0; | |
947 | } | |
948 | ||
949 | /* TRB math checks for xhci_trb_in_td(), using the command and event rings. */ | |
950 | static int xhci_check_trb_in_td_math(struct xhci_hcd *xhci, gfp_t mem_flags) | |
951 | { | |
952 | struct { | |
953 | dma_addr_t input_dma; | |
954 | struct xhci_segment *result_seg; | |
955 | } simple_test_vector [] = { | |
956 | /* A zeroed DMA field should fail */ | |
957 | { 0, NULL }, | |
958 | /* One TRB before the ring start should fail */ | |
959 | { xhci->event_ring->first_seg->dma - 16, NULL }, | |
960 | /* One byte before the ring start should fail */ | |
961 | { xhci->event_ring->first_seg->dma - 1, NULL }, | |
962 | /* Starting TRB should succeed */ | |
963 | { xhci->event_ring->first_seg->dma, xhci->event_ring->first_seg }, | |
964 | /* Ending TRB should succeed */ | |
965 | { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16, | |
966 | xhci->event_ring->first_seg }, | |
967 | /* One byte after the ring end should fail */ | |
968 | { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 1)*16 + 1, NULL }, | |
969 | /* One TRB after the ring end should fail */ | |
970 | { xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT)*16, NULL }, | |
971 | /* An address of all ones should fail */ | |
972 | { (dma_addr_t) (~0), NULL }, | |
973 | }; | |
974 | struct { | |
975 | struct xhci_segment *input_seg; | |
976 | union xhci_trb *start_trb; | |
977 | union xhci_trb *end_trb; | |
978 | dma_addr_t input_dma; | |
979 | struct xhci_segment *result_seg; | |
980 | } complex_test_vector [] = { | |
981 | /* Test feeding a valid DMA address from a different ring */ | |
982 | { .input_seg = xhci->event_ring->first_seg, | |
983 | .start_trb = xhci->event_ring->first_seg->trbs, | |
984 | .end_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], | |
985 | .input_dma = xhci->cmd_ring->first_seg->dma, | |
986 | .result_seg = NULL, | |
987 | }, | |
988 | /* Test feeding a valid end TRB from a different ring */ | |
989 | { .input_seg = xhci->event_ring->first_seg, | |
990 | .start_trb = xhci->event_ring->first_seg->trbs, | |
991 | .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], | |
992 | .input_dma = xhci->cmd_ring->first_seg->dma, | |
993 | .result_seg = NULL, | |
994 | }, | |
995 | /* Test feeding a valid start and end TRB from a different ring */ | |
996 | { .input_seg = xhci->event_ring->first_seg, | |
997 | .start_trb = xhci->cmd_ring->first_seg->trbs, | |
998 | .end_trb = &xhci->cmd_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], | |
999 | .input_dma = xhci->cmd_ring->first_seg->dma, | |
1000 | .result_seg = NULL, | |
1001 | }, | |
1002 | /* TRB in this ring, but after this TD */ | |
1003 | { .input_seg = xhci->event_ring->first_seg, | |
1004 | .start_trb = &xhci->event_ring->first_seg->trbs[0], | |
1005 | .end_trb = &xhci->event_ring->first_seg->trbs[3], | |
1006 | .input_dma = xhci->event_ring->first_seg->dma + 4*16, | |
1007 | .result_seg = NULL, | |
1008 | }, | |
1009 | /* TRB in this ring, but before this TD */ | |
1010 | { .input_seg = xhci->event_ring->first_seg, | |
1011 | .start_trb = &xhci->event_ring->first_seg->trbs[3], | |
1012 | .end_trb = &xhci->event_ring->first_seg->trbs[6], | |
1013 | .input_dma = xhci->event_ring->first_seg->dma + 2*16, | |
1014 | .result_seg = NULL, | |
1015 | }, | |
1016 | /* TRB in this ring, but after this wrapped TD */ | |
1017 | { .input_seg = xhci->event_ring->first_seg, | |
1018 | .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], | |
1019 | .end_trb = &xhci->event_ring->first_seg->trbs[1], | |
1020 | .input_dma = xhci->event_ring->first_seg->dma + 2*16, | |
1021 | .result_seg = NULL, | |
1022 | }, | |
1023 | /* TRB in this ring, but before this wrapped TD */ | |
1024 | { .input_seg = xhci->event_ring->first_seg, | |
1025 | .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], | |
1026 | .end_trb = &xhci->event_ring->first_seg->trbs[1], | |
1027 | .input_dma = xhci->event_ring->first_seg->dma + (TRBS_PER_SEGMENT - 4)*16, | |
1028 | .result_seg = NULL, | |
1029 | }, | |
1030 | /* TRB not in this ring, and we have a wrapped TD */ | |
1031 | { .input_seg = xhci->event_ring->first_seg, | |
1032 | .start_trb = &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 3], | |
1033 | .end_trb = &xhci->event_ring->first_seg->trbs[1], | |
1034 | .input_dma = xhci->cmd_ring->first_seg->dma + 2*16, | |
1035 | .result_seg = NULL, | |
1036 | }, | |
1037 | }; | |
1038 | ||
1039 | unsigned int num_tests; | |
1040 | int i, ret; | |
1041 | ||
1042 | num_tests = sizeof(simple_test_vector) / sizeof(simple_test_vector[0]); | |
1043 | for (i = 0; i < num_tests; i++) { | |
1044 | ret = xhci_test_trb_in_td(xhci, | |
1045 | xhci->event_ring->first_seg, | |
1046 | xhci->event_ring->first_seg->trbs, | |
1047 | &xhci->event_ring->first_seg->trbs[TRBS_PER_SEGMENT - 1], | |
1048 | simple_test_vector[i].input_dma, | |
1049 | simple_test_vector[i].result_seg, | |
1050 | "Simple", i); | |
1051 | if (ret < 0) | |
1052 | return ret; | |
1053 | } | |
1054 | ||
1055 | num_tests = sizeof(complex_test_vector) / sizeof(complex_test_vector[0]); | |
1056 | for (i = 0; i < num_tests; i++) { | |
1057 | ret = xhci_test_trb_in_td(xhci, | |
1058 | complex_test_vector[i].input_seg, | |
1059 | complex_test_vector[i].start_trb, | |
1060 | complex_test_vector[i].end_trb, | |
1061 | complex_test_vector[i].input_dma, | |
1062 | complex_test_vector[i].result_seg, | |
1063 | "Complex", i); | |
1064 | if (ret < 0) | |
1065 | return ret; | |
1066 | } | |
1067 | xhci_dbg(xhci, "TRB math tests passed.\n"); | |
1068 | return 0; | |
1069 | } | |
1070 | ||
1071 | ||
66d4eadd SS |
1072 | int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) |
1073 | { | |
0ebbab37 SS |
1074 | dma_addr_t dma; |
1075 | struct device *dev = xhci_to_hcd(xhci)->self.controller; | |
66d4eadd | 1076 | unsigned int val, val2; |
8e595a5d | 1077 | u64 val_64; |
0ebbab37 | 1078 | struct xhci_segment *seg; |
66d4eadd SS |
1079 | u32 page_size; |
1080 | int i; | |
1081 | ||
1082 | page_size = xhci_readl(xhci, &xhci->op_regs->page_size); | |
1083 | xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size); | |
1084 | for (i = 0; i < 16; i++) { | |
1085 | if ((0x1 & page_size) != 0) | |
1086 | break; | |
1087 | page_size = page_size >> 1; | |
1088 | } | |
1089 | if (i < 16) | |
1090 | xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024); | |
1091 | else | |
1092 | xhci_warn(xhci, "WARN: no supported page size\n"); | |
1093 | /* Use 4K pages, since that's common and the minimum the HC supports */ | |
1094 | xhci->page_shift = 12; | |
1095 | xhci->page_size = 1 << xhci->page_shift; | |
1096 | xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024); | |
1097 | ||
1098 | /* | |
1099 | * Program the Number of Device Slots Enabled field in the CONFIG | |
1100 | * register with the max value of slots the HC can handle. | |
1101 | */ | |
1102 | val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1)); | |
1103 | xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n", | |
1104 | (unsigned int) val); | |
1105 | val2 = xhci_readl(xhci, &xhci->op_regs->config_reg); | |
1106 | val |= (val2 & ~HCS_SLOTS_MASK); | |
1107 | xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n", | |
1108 | (unsigned int) val); | |
1109 | xhci_writel(xhci, val, &xhci->op_regs->config_reg); | |
1110 | ||
a74588f9 SS |
1111 | /* |
1112 | * Section 5.4.8 - doorbell array must be | |
1113 | * "physically contiguous and 64-byte (cache line) aligned". | |
1114 | */ | |
1115 | xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev), | |
1116 | sizeof(*xhci->dcbaa), &dma); | |
1117 | if (!xhci->dcbaa) | |
1118 | goto fail; | |
1119 | memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); | |
1120 | xhci->dcbaa->dma = dma; | |
700e2052 GKH |
1121 | xhci_dbg(xhci, "// Device context base array address = 0x%llx (DMA), %p (virt)\n", |
1122 | (unsigned long long)xhci->dcbaa->dma, xhci->dcbaa); | |
8e595a5d | 1123 | xhci_write_64(xhci, dma, &xhci->op_regs->dcbaa_ptr); |
a74588f9 | 1124 | |
0ebbab37 SS |
1125 | /* |
1126 | * Initialize the ring segment pool. The ring must be a contiguous | |
1127 | * structure comprised of TRBs. The TRBs must be 16 byte aligned, | |
1128 | * however, the command ring segment needs 64-byte aligned segments, | |
1129 | * so we pick the greater alignment need. | |
1130 | */ | |
1131 | xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, | |
1132 | SEGMENT_SIZE, 64, xhci->page_size); | |
d115b048 | 1133 | |
3ffbba95 | 1134 | /* See Table 46 and Note on Figure 55 */ |
3ffbba95 | 1135 | xhci->device_pool = dma_pool_create("xHCI input/output contexts", dev, |
d115b048 | 1136 | 2112, 64, xhci->page_size); |
3ffbba95 | 1137 | if (!xhci->segment_pool || !xhci->device_pool) |
0ebbab37 SS |
1138 | goto fail; |
1139 | ||
1140 | /* Set up the command ring to have one segments for now. */ | |
1141 | xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags); | |
1142 | if (!xhci->cmd_ring) | |
1143 | goto fail; | |
700e2052 GKH |
1144 | xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring); |
1145 | xhci_dbg(xhci, "First segment DMA is 0x%llx\n", | |
1146 | (unsigned long long)xhci->cmd_ring->first_seg->dma); | |
0ebbab37 SS |
1147 | |
1148 | /* Set the address in the Command Ring Control register */ | |
8e595a5d SS |
1149 | val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); |
1150 | val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) | | |
1151 | (xhci->cmd_ring->first_seg->dma & (u64) ~CMD_RING_RSVD_BITS) | | |
0ebbab37 | 1152 | xhci->cmd_ring->cycle_state; |
8e595a5d SS |
1153 | xhci_dbg(xhci, "// Setting command ring address to 0x%x\n", val); |
1154 | xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring); | |
0ebbab37 SS |
1155 | xhci_dbg_cmd_ptrs(xhci); |
1156 | ||
1157 | val = xhci_readl(xhci, &xhci->cap_regs->db_off); | |
1158 | val &= DBOFF_MASK; | |
1159 | xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" | |
1160 | " from cap regs base addr\n", val); | |
1161 | xhci->dba = (void *) xhci->cap_regs + val; | |
1162 | xhci_dbg_regs(xhci); | |
1163 | xhci_print_run_regs(xhci); | |
1164 | /* Set ir_set to interrupt register set 0 */ | |
1165 | xhci->ir_set = (void *) xhci->run_regs->ir_set; | |
1166 | ||
1167 | /* | |
1168 | * Event ring setup: Allocate a normal ring, but also setup | |
1169 | * the event ring segment table (ERST). Section 4.9.3. | |
1170 | */ | |
1171 | xhci_dbg(xhci, "// Allocating event ring\n"); | |
1172 | xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags); | |
1173 | if (!xhci->event_ring) | |
1174 | goto fail; | |
6648f29d SS |
1175 | if (xhci_check_trb_in_td_math(xhci, flags) < 0) |
1176 | goto fail; | |
0ebbab37 SS |
1177 | |
1178 | xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev), | |
1179 | sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma); | |
1180 | if (!xhci->erst.entries) | |
1181 | goto fail; | |
700e2052 GKH |
1182 | xhci_dbg(xhci, "// Allocated event ring segment table at 0x%llx\n", |
1183 | (unsigned long long)dma); | |
0ebbab37 SS |
1184 | |
1185 | memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS); | |
1186 | xhci->erst.num_entries = ERST_NUM_SEGS; | |
1187 | xhci->erst.erst_dma_addr = dma; | |
700e2052 | 1188 | xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = %p, dma addr = 0x%llx\n", |
0ebbab37 | 1189 | xhci->erst.num_entries, |
700e2052 GKH |
1190 | xhci->erst.entries, |
1191 | (unsigned long long)xhci->erst.erst_dma_addr); | |
0ebbab37 SS |
1192 | |
1193 | /* set ring base address and size for each segment table entry */ | |
1194 | for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { | |
1195 | struct xhci_erst_entry *entry = &xhci->erst.entries[val]; | |
8e595a5d | 1196 | entry->seg_addr = seg->dma; |
0ebbab37 SS |
1197 | entry->seg_size = TRBS_PER_SEGMENT; |
1198 | entry->rsvd = 0; | |
1199 | seg = seg->next; | |
1200 | } | |
1201 | ||
1202 | /* set ERST count with the number of entries in the segment table */ | |
1203 | val = xhci_readl(xhci, &xhci->ir_set->erst_size); | |
1204 | val &= ERST_SIZE_MASK; | |
1205 | val |= ERST_NUM_SEGS; | |
1206 | xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n", | |
1207 | val); | |
1208 | xhci_writel(xhci, val, &xhci->ir_set->erst_size); | |
1209 | ||
1210 | xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n"); | |
1211 | /* set the segment table base address */ | |
700e2052 GKH |
1212 | xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%llx\n", |
1213 | (unsigned long long)xhci->erst.erst_dma_addr); | |
8e595a5d SS |
1214 | val_64 = xhci_read_64(xhci, &xhci->ir_set->erst_base); |
1215 | val_64 &= ERST_PTR_MASK; | |
1216 | val_64 |= (xhci->erst.erst_dma_addr & (u64) ~ERST_PTR_MASK); | |
1217 | xhci_write_64(xhci, val_64, &xhci->ir_set->erst_base); | |
0ebbab37 SS |
1218 | |
1219 | /* Set the event ring dequeue address */ | |
23e3be11 | 1220 | xhci_set_hc_event_deq(xhci); |
0ebbab37 SS |
1221 | xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); |
1222 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | |
1223 | ||
1224 | /* | |
1225 | * XXX: Might need to set the Interrupter Moderation Register to | |
1226 | * something other than the default (~1ms minimum between interrupts). | |
1227 | * See section 5.5.1.2. | |
1228 | */ | |
3ffbba95 SS |
1229 | init_completion(&xhci->addr_dev); |
1230 | for (i = 0; i < MAX_HC_SLOTS; ++i) | |
1231 | xhci->devs[i] = 0; | |
66d4eadd | 1232 | |
254c80a3 JY |
1233 | if (scratchpad_alloc(xhci, flags)) |
1234 | goto fail; | |
1235 | ||
66d4eadd | 1236 | return 0; |
254c80a3 | 1237 | |
66d4eadd SS |
1238 | fail: |
1239 | xhci_warn(xhci, "Couldn't initialize memory\n"); | |
1240 | xhci_mem_cleanup(xhci); | |
1241 | return -ENOMEM; | |
1242 | } |