Commit | Line | Data |
---|---|---|
66d4eadd SS |
1 | /* |
2 | * xHCI host controller driver | |
3 | * | |
4 | * Copyright (C) 2008 Intel Corp. | |
5 | * | |
6 | * Author: Sarah Sharp | |
7 | * Some code borrowed from the Linux EHCI driver. | |
8 | * | |
9 | * This program is free software; you can redistribute it and/or modify | |
10 | * it under the terms of the GNU General Public License version 2 as | |
11 | * published by the Free Software Foundation. | |
12 | * | |
13 | * This program is distributed in the hope that it will be useful, but | |
14 | * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY | |
15 | * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License | |
16 | * for more details. | |
17 | * | |
18 | * You should have received a copy of the GNU General Public License | |
19 | * along with this program; if not, write to the Free Software Foundation, | |
20 | * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. | |
21 | */ | |
22 | ||
23 | #include <linux/usb.h> | |
0ebbab37 | 24 | #include <linux/pci.h> |
66d4eadd SS |
25 | |
26 | #include "xhci.h" | |
27 | ||
0ebbab37 SS |
28 | /* |
29 | * Allocates a generic ring segment from the ring pool, sets the dma address, | |
30 | * initializes the segment to zero, and sets the private next pointer to NULL. | |
31 | * | |
32 | * Section 4.11.1.1: | |
33 | * "All components of all Command and Transfer TRBs shall be initialized to '0'" | |
34 | */ | |
35 | static struct xhci_segment *xhci_segment_alloc(struct xhci_hcd *xhci, gfp_t flags) | |
36 | { | |
37 | struct xhci_segment *seg; | |
38 | dma_addr_t dma; | |
39 | ||
40 | seg = kzalloc(sizeof *seg, flags); | |
41 | if (!seg) | |
42 | return 0; | |
43 | xhci_dbg(xhci, "Allocating priv segment structure at 0x%x\n", | |
44 | (unsigned int) seg); | |
45 | ||
46 | seg->trbs = dma_pool_alloc(xhci->segment_pool, flags, &dma); | |
47 | if (!seg->trbs) { | |
48 | kfree(seg); | |
49 | return 0; | |
50 | } | |
51 | xhci_dbg(xhci, "// Allocating segment at 0x%x (virtual) 0x%x (DMA)\n", | |
52 | (unsigned int) seg->trbs, (u32) dma); | |
53 | ||
54 | memset(seg->trbs, 0, SEGMENT_SIZE); | |
55 | seg->dma = dma; | |
56 | seg->next = NULL; | |
57 | ||
58 | return seg; | |
59 | } | |
60 | ||
61 | static void xhci_segment_free(struct xhci_hcd *xhci, struct xhci_segment *seg) | |
62 | { | |
63 | if (!seg) | |
64 | return; | |
65 | if (seg->trbs) { | |
66 | xhci_dbg(xhci, "Freeing DMA segment at 0x%x" | |
67 | " (virtual) 0x%x (DMA)\n", | |
68 | (unsigned int) seg->trbs, (u32) seg->dma); | |
69 | dma_pool_free(xhci->segment_pool, seg->trbs, seg->dma); | |
70 | seg->trbs = NULL; | |
71 | } | |
72 | xhci_dbg(xhci, "Freeing priv segment structure at 0x%x\n", | |
73 | (unsigned int) seg); | |
74 | kfree(seg); | |
75 | } | |
76 | ||
77 | /* | |
78 | * Make the prev segment point to the next segment. | |
79 | * | |
80 | * Change the last TRB in the prev segment to be a Link TRB which points to the | |
81 | * DMA address of the next segment. The caller needs to set any Link TRB | |
82 | * related flags, such as End TRB, Toggle Cycle, and no snoop. | |
83 | */ | |
84 | static void xhci_link_segments(struct xhci_hcd *xhci, struct xhci_segment *prev, | |
85 | struct xhci_segment *next, bool link_trbs) | |
86 | { | |
87 | u32 val; | |
88 | ||
89 | if (!prev || !next) | |
90 | return; | |
91 | prev->next = next; | |
92 | if (link_trbs) { | |
93 | prev->trbs[TRBS_PER_SEGMENT-1].link.segment_ptr[0] = next->dma; | |
94 | ||
95 | /* Set the last TRB in the segment to have a TRB type ID of Link TRB */ | |
96 | val = prev->trbs[TRBS_PER_SEGMENT-1].link.control; | |
97 | val &= ~TRB_TYPE_BITMASK; | |
98 | val |= TRB_TYPE(TRB_LINK); | |
99 | prev->trbs[TRBS_PER_SEGMENT-1].link.control = val; | |
100 | } | |
101 | xhci_dbg(xhci, "Linking segment 0x%x to segment 0x%x (DMA)\n", | |
102 | prev->dma, next->dma); | |
103 | } | |
104 | ||
105 | /* XXX: Do we need the hcd structure in all these functions? */ | |
106 | static void xhci_ring_free(struct xhci_hcd *xhci, struct xhci_ring *ring) | |
107 | { | |
108 | struct xhci_segment *seg; | |
109 | struct xhci_segment *first_seg; | |
110 | ||
111 | if (!ring || !ring->first_seg) | |
112 | return; | |
113 | first_seg = ring->first_seg; | |
114 | seg = first_seg->next; | |
115 | xhci_dbg(xhci, "Freeing ring at 0x%x\n", (unsigned int) ring); | |
116 | while (seg != first_seg) { | |
117 | struct xhci_segment *next = seg->next; | |
118 | xhci_segment_free(xhci, seg); | |
119 | seg = next; | |
120 | } | |
121 | xhci_segment_free(xhci, first_seg); | |
122 | ring->first_seg = NULL; | |
123 | kfree(ring); | |
124 | } | |
125 | ||
126 | /** | |
127 | * Create a new ring with zero or more segments. | |
128 | * | |
129 | * Link each segment together into a ring. | |
130 | * Set the end flag and the cycle toggle bit on the last segment. | |
131 | * See section 4.9.1 and figures 15 and 16. | |
132 | */ | |
133 | static struct xhci_ring *xhci_ring_alloc(struct xhci_hcd *xhci, | |
134 | unsigned int num_segs, bool link_trbs, gfp_t flags) | |
135 | { | |
136 | struct xhci_ring *ring; | |
137 | struct xhci_segment *prev; | |
138 | ||
139 | ring = kzalloc(sizeof *(ring), flags); | |
140 | xhci_dbg(xhci, "Allocating ring at 0x%x\n", (unsigned int) ring); | |
141 | if (!ring) | |
142 | return 0; | |
143 | ||
144 | if (num_segs == 0) | |
145 | return ring; | |
146 | ||
147 | ring->first_seg = xhci_segment_alloc(xhci, flags); | |
148 | if (!ring->first_seg) | |
149 | goto fail; | |
150 | num_segs--; | |
151 | ||
152 | prev = ring->first_seg; | |
153 | while (num_segs > 0) { | |
154 | struct xhci_segment *next; | |
155 | ||
156 | next = xhci_segment_alloc(xhci, flags); | |
157 | if (!next) | |
158 | goto fail; | |
159 | xhci_link_segments(xhci, prev, next, link_trbs); | |
160 | ||
161 | prev = next; | |
162 | num_segs--; | |
163 | } | |
164 | xhci_link_segments(xhci, prev, ring->first_seg, link_trbs); | |
165 | ||
166 | if (link_trbs) { | |
167 | /* See section 4.9.2.1 and 6.4.4.1 */ | |
168 | prev->trbs[TRBS_PER_SEGMENT-1].link.control |= (LINK_TOGGLE); | |
169 | xhci_dbg(xhci, "Wrote link toggle flag to" | |
170 | " segment 0x%x (virtual), 0x%x (DMA)\n", | |
171 | (unsigned int) prev, (u32) prev->dma); | |
172 | } | |
173 | /* The ring is empty, so the enqueue pointer == dequeue pointer */ | |
174 | ring->enqueue = ring->first_seg->trbs; | |
7f84eef0 | 175 | ring->enq_seg = ring->first_seg; |
0ebbab37 | 176 | ring->dequeue = ring->enqueue; |
7f84eef0 | 177 | ring->deq_seg = ring->first_seg; |
0ebbab37 SS |
178 | /* The ring is initialized to 0. The producer must write 1 to the cycle |
179 | * bit to handover ownership of the TRB, so PCS = 1. The consumer must | |
180 | * compare CCS to the cycle bit to check ownership, so CCS = 1. | |
181 | */ | |
182 | ring->cycle_state = 1; | |
183 | ||
184 | return ring; | |
185 | ||
186 | fail: | |
187 | xhci_ring_free(xhci, ring); | |
188 | return 0; | |
189 | } | |
190 | ||
66d4eadd SS |
191 | void xhci_mem_cleanup(struct xhci_hcd *xhci) |
192 | { | |
0ebbab37 SS |
193 | struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); |
194 | int size; | |
195 | ||
196 | /* XXX: Free all the segments in the various rings */ | |
197 | ||
198 | /* Free the Event Ring Segment Table and the actual Event Ring */ | |
199 | xhci_writel(xhci, 0, &xhci->ir_set->erst_size); | |
200 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); | |
201 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[0]); | |
202 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[1]); | |
203 | xhci_writel(xhci, 0, &xhci->ir_set->erst_dequeue[0]); | |
204 | size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); | |
205 | if (xhci->erst.entries) | |
206 | pci_free_consistent(pdev, size, | |
207 | xhci->erst.entries, xhci->erst.erst_dma_addr); | |
208 | xhci->erst.entries = NULL; | |
209 | xhci_dbg(xhci, "Freed ERST\n"); | |
210 | if (xhci->event_ring) | |
211 | xhci_ring_free(xhci, xhci->event_ring); | |
212 | xhci->event_ring = NULL; | |
213 | xhci_dbg(xhci, "Freed event ring\n"); | |
214 | ||
215 | xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[1]); | |
216 | xhci_writel(xhci, 0, &xhci->op_regs->cmd_ring[0]); | |
217 | if (xhci->cmd_ring) | |
218 | xhci_ring_free(xhci, xhci->cmd_ring); | |
219 | xhci->cmd_ring = NULL; | |
220 | xhci_dbg(xhci, "Freed command ring\n"); | |
221 | if (xhci->segment_pool) | |
222 | dma_pool_destroy(xhci->segment_pool); | |
223 | xhci->segment_pool = NULL; | |
224 | xhci_dbg(xhci, "Freed segment pool\n"); | |
a74588f9 SS |
225 | xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[1]); |
226 | xhci_writel(xhci, 0, &xhci->op_regs->dcbaa_ptr[0]); | |
227 | if (xhci->dcbaa) | |
228 | pci_free_consistent(pdev, sizeof(*xhci->dcbaa), | |
229 | xhci->dcbaa, xhci->dcbaa->dma); | |
230 | xhci->dcbaa = NULL; | |
66d4eadd SS |
231 | xhci->page_size = 0; |
232 | xhci->page_shift = 0; | |
233 | } | |
234 | ||
235 | int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) | |
236 | { | |
0ebbab37 SS |
237 | dma_addr_t dma; |
238 | struct device *dev = xhci_to_hcd(xhci)->self.controller; | |
66d4eadd | 239 | unsigned int val, val2; |
0ebbab37 | 240 | struct xhci_segment *seg; |
66d4eadd SS |
241 | u32 page_size; |
242 | int i; | |
243 | ||
244 | page_size = xhci_readl(xhci, &xhci->op_regs->page_size); | |
245 | xhci_dbg(xhci, "Supported page size register = 0x%x\n", page_size); | |
246 | for (i = 0; i < 16; i++) { | |
247 | if ((0x1 & page_size) != 0) | |
248 | break; | |
249 | page_size = page_size >> 1; | |
250 | } | |
251 | if (i < 16) | |
252 | xhci_dbg(xhci, "Supported page size of %iK\n", (1 << (i+12)) / 1024); | |
253 | else | |
254 | xhci_warn(xhci, "WARN: no supported page size\n"); | |
255 | /* Use 4K pages, since that's common and the minimum the HC supports */ | |
256 | xhci->page_shift = 12; | |
257 | xhci->page_size = 1 << xhci->page_shift; | |
258 | xhci_dbg(xhci, "HCD page size set to %iK\n", xhci->page_size / 1024); | |
259 | ||
260 | /* | |
261 | * Program the Number of Device Slots Enabled field in the CONFIG | |
262 | * register with the max value of slots the HC can handle. | |
263 | */ | |
264 | val = HCS_MAX_SLOTS(xhci_readl(xhci, &xhci->cap_regs->hcs_params1)); | |
265 | xhci_dbg(xhci, "// xHC can handle at most %d device slots.\n", | |
266 | (unsigned int) val); | |
267 | val2 = xhci_readl(xhci, &xhci->op_regs->config_reg); | |
268 | val |= (val2 & ~HCS_SLOTS_MASK); | |
269 | xhci_dbg(xhci, "// Setting Max device slots reg = 0x%x.\n", | |
270 | (unsigned int) val); | |
271 | xhci_writel(xhci, val, &xhci->op_regs->config_reg); | |
272 | ||
a74588f9 SS |
273 | /* |
274 | * Section 5.4.8 - doorbell array must be | |
275 | * "physically contiguous and 64-byte (cache line) aligned". | |
276 | */ | |
277 | xhci->dcbaa = pci_alloc_consistent(to_pci_dev(dev), | |
278 | sizeof(*xhci->dcbaa), &dma); | |
279 | if (!xhci->dcbaa) | |
280 | goto fail; | |
281 | memset(xhci->dcbaa, 0, sizeof *(xhci->dcbaa)); | |
282 | xhci->dcbaa->dma = dma; | |
283 | xhci_dbg(xhci, "// Setting device context base array address to 0x%x\n", | |
284 | xhci->dcbaa->dma); | |
285 | xhci_writel(xhci, (u32) 0, &xhci->op_regs->dcbaa_ptr[1]); | |
286 | xhci_writel(xhci, dma, &xhci->op_regs->dcbaa_ptr[0]); | |
287 | ||
0ebbab37 SS |
288 | /* |
289 | * Initialize the ring segment pool. The ring must be a contiguous | |
290 | * structure comprised of TRBs. The TRBs must be 16 byte aligned, | |
291 | * however, the command ring segment needs 64-byte aligned segments, | |
292 | * so we pick the greater alignment need. | |
293 | */ | |
294 | xhci->segment_pool = dma_pool_create("xHCI ring segments", dev, | |
295 | SEGMENT_SIZE, 64, xhci->page_size); | |
296 | if (!xhci->segment_pool) | |
297 | goto fail; | |
298 | ||
299 | /* Set up the command ring to have one segments for now. */ | |
300 | xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, flags); | |
301 | if (!xhci->cmd_ring) | |
302 | goto fail; | |
303 | xhci_dbg(xhci, "Allocated command ring at 0x%x\n", (unsigned int) xhci->cmd_ring); | |
304 | xhci_dbg(xhci, "First segment DMA is 0x%x\n", (unsigned int) xhci->cmd_ring->first_seg->dma); | |
305 | ||
306 | /* Set the address in the Command Ring Control register */ | |
307 | val = xhci_readl(xhci, &xhci->op_regs->cmd_ring[0]); | |
308 | val = (val & ~CMD_RING_ADDR_MASK) | | |
309 | (xhci->cmd_ring->first_seg->dma & CMD_RING_ADDR_MASK) | | |
310 | xhci->cmd_ring->cycle_state; | |
311 | xhci_dbg(xhci, "// Setting command ring address high bits to 0x0\n"); | |
312 | xhci_writel(xhci, (u32) 0, &xhci->op_regs->cmd_ring[1]); | |
313 | xhci_dbg(xhci, "// Setting command ring address low bits to 0x%x\n", val); | |
314 | xhci_writel(xhci, val, &xhci->op_regs->cmd_ring[0]); | |
315 | xhci_dbg_cmd_ptrs(xhci); | |
316 | ||
317 | val = xhci_readl(xhci, &xhci->cap_regs->db_off); | |
318 | val &= DBOFF_MASK; | |
319 | xhci_dbg(xhci, "// Doorbell array is located at offset 0x%x" | |
320 | " from cap regs base addr\n", val); | |
321 | xhci->dba = (void *) xhci->cap_regs + val; | |
322 | xhci_dbg_regs(xhci); | |
323 | xhci_print_run_regs(xhci); | |
324 | /* Set ir_set to interrupt register set 0 */ | |
325 | xhci->ir_set = (void *) xhci->run_regs->ir_set; | |
326 | ||
327 | /* | |
328 | * Event ring setup: Allocate a normal ring, but also setup | |
329 | * the event ring segment table (ERST). Section 4.9.3. | |
330 | */ | |
331 | xhci_dbg(xhci, "// Allocating event ring\n"); | |
332 | xhci->event_ring = xhci_ring_alloc(xhci, ERST_NUM_SEGS, false, flags); | |
333 | if (!xhci->event_ring) | |
334 | goto fail; | |
335 | ||
336 | xhci->erst.entries = pci_alloc_consistent(to_pci_dev(dev), | |
337 | sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS, &dma); | |
338 | if (!xhci->erst.entries) | |
339 | goto fail; | |
340 | xhci_dbg(xhci, "// Allocated event ring segment table at 0x%x\n", dma); | |
341 | ||
342 | memset(xhci->erst.entries, 0, sizeof(struct xhci_erst_entry)*ERST_NUM_SEGS); | |
343 | xhci->erst.num_entries = ERST_NUM_SEGS; | |
344 | xhci->erst.erst_dma_addr = dma; | |
345 | xhci_dbg(xhci, "Set ERST to 0; private num segs = %i, virt addr = 0x%x, dma addr = 0x%x\n", | |
346 | xhci->erst.num_entries, | |
347 | (unsigned int) xhci->erst.entries, | |
348 | xhci->erst.erst_dma_addr); | |
349 | ||
350 | /* set ring base address and size for each segment table entry */ | |
351 | for (val = 0, seg = xhci->event_ring->first_seg; val < ERST_NUM_SEGS; val++) { | |
352 | struct xhci_erst_entry *entry = &xhci->erst.entries[val]; | |
353 | entry->seg_addr[1] = 0; | |
354 | entry->seg_addr[0] = seg->dma; | |
355 | entry->seg_size = TRBS_PER_SEGMENT; | |
356 | entry->rsvd = 0; | |
357 | seg = seg->next; | |
358 | } | |
359 | ||
360 | /* set ERST count with the number of entries in the segment table */ | |
361 | val = xhci_readl(xhci, &xhci->ir_set->erst_size); | |
362 | val &= ERST_SIZE_MASK; | |
363 | val |= ERST_NUM_SEGS; | |
364 | xhci_dbg(xhci, "// Write ERST size = %i to ir_set 0 (some bits preserved)\n", | |
365 | val); | |
366 | xhci_writel(xhci, val, &xhci->ir_set->erst_size); | |
367 | ||
368 | xhci_dbg(xhci, "// Set ERST entries to point to event ring.\n"); | |
369 | /* set the segment table base address */ | |
370 | xhci_dbg(xhci, "// Set ERST base address for ir_set 0 = 0x%x\n", | |
371 | xhci->erst.erst_dma_addr); | |
372 | xhci_writel(xhci, 0, &xhci->ir_set->erst_base[1]); | |
373 | val = xhci_readl(xhci, &xhci->ir_set->erst_base[0]); | |
374 | val &= ERST_PTR_MASK; | |
375 | val |= (xhci->erst.erst_dma_addr & ~ERST_PTR_MASK); | |
376 | xhci_writel(xhci, val, &xhci->ir_set->erst_base[0]); | |
377 | ||
378 | /* Set the event ring dequeue address */ | |
7f84eef0 | 379 | set_hc_event_deq(xhci); |
0ebbab37 SS |
380 | xhci_dbg(xhci, "Wrote ERST address to ir_set 0.\n"); |
381 | xhci_print_ir_set(xhci, xhci->ir_set, 0); | |
382 | ||
383 | /* | |
384 | * XXX: Might need to set the Interrupter Moderation Register to | |
385 | * something other than the default (~1ms minimum between interrupts). | |
386 | * See section 5.5.1.2. | |
387 | */ | |
66d4eadd SS |
388 | |
389 | return 0; | |
390 | fail: | |
391 | xhci_warn(xhci, "Couldn't initialize memory\n"); | |
392 | xhci_mem_cleanup(xhci); | |
393 | return -ENOMEM; | |
394 | } |